diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..86a6d6ae4d7df92a6570ac3da69a0e4c97f02ab0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -57,3 +57,289 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text # Video files - compressed *.mp4 filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text +tags_astropy__astropy-12907.json filter=lfs diff=lfs merge=lfs -text +tags_astropy__astropy-14182.json filter=lfs diff=lfs merge=lfs -text +tags_astropy__astropy-14365.json filter=lfs diff=lfs merge=lfs -text +tags_astropy__astropy-14995.json filter=lfs diff=lfs merge=lfs -text +tags_astropy__astropy-6938.json filter=lfs diff=lfs merge=lfs -text +tags_astropy__astropy-7746.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-10914.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-10924.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11001.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11019.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11039.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11049.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11099.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11133.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11179.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11283.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11422.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11564.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11583.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11620.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11630.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11742.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11797.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11815.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11848.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11905.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11910.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11964.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-11999.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12113.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12125.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12184.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12284.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12286.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12308.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12453.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12470.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12497.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12589.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12700.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12708.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12747.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12856.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12908.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12915.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-12983.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13028.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13033.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13158.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13220.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13230.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13265.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13315.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13321.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13401.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13447.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13448.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13551.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13590.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13658.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13660.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13710.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13757.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13768.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13925.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13933.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-13964.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14016.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14017.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14155.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14238.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14382.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14411.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14534.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14580.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14608.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14667.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14672.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14730.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14752.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14787.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14855.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14915.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14997.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-14999.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15061.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15202.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15213.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15252.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15320.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15347.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15388.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15400.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15498.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15695.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15738.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15781.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15789.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15790.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15814.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15819.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15851.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15902.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-15996.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16041.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16046.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16139.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16229.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16255.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16379.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16400.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16408.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16527.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16595.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16816.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16820.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16873.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-16910.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-17051.json filter=lfs diff=lfs merge=lfs -text +tags_django__django-17087.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-18869.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-22711.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-22835.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23299.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23314.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23476.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23562.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23563.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23913.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23964.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-23987.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-24149.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-24265.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-24334.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-24970.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-25079.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-25311.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-25332.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-25433.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-25442.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-25498.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-26011.json filter=lfs diff=lfs merge=lfs -text +tags_matplotlib__matplotlib-26020.json filter=lfs diff=lfs merge=lfs -text +tags_pydata__xarray-3364.json filter=lfs diff=lfs merge=lfs -text +tags_pydata__xarray-4094.json filter=lfs diff=lfs merge=lfs -text +tags_pydata__xarray-4248.json filter=lfs diff=lfs merge=lfs -text +tags_pydata__xarray-4493.json filter=lfs diff=lfs merge=lfs -text +tags_pydata__xarray-5131.json filter=lfs diff=lfs merge=lfs -text +tags_pylint-dev__pylint-5859.json filter=lfs diff=lfs merge=lfs -text +tags_pylint-dev__pylint-6506.json filter=lfs diff=lfs merge=lfs -text +tags_pylint-dev__pylint-7080.json filter=lfs diff=lfs merge=lfs -text +tags_pylint-dev__pylint-7114.json filter=lfs diff=lfs merge=lfs -text +tags_pylint-dev__pylint-7228.json filter=lfs diff=lfs merge=lfs -text +tags_pylint-dev__pylint-7993.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-11143.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-11148.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-5103.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-5221.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-5227.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-5413.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-5495.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-5692.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-6116.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-7168.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-7220.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-7373.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-7432.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-7490.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-8365.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-8906.json filter=lfs diff=lfs merge=lfs -text +tags_pytest-dev__pytest-9359.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-10297.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-10949.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-11040.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-11281.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-12471.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13142.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13241.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13439.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13496.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13497.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13584.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-13779.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-14087.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-14092.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-14894.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-14983.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-15512.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-15535.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-25500.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-25570.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-25638.json filter=lfs diff=lfs merge=lfs -text +tags_scikit-learn__scikit-learn-25747.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-10325.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-10451.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-11445.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-7686.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-7738.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-7975.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8273.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8282.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8435.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8474.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8506.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8595.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8627.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8713.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8721.json filter=lfs diff=lfs merge=lfs -text +tags_sphinx-doc__sphinx-8801.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-11400.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-11870.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-11897.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-12171.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-12236.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-12419.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-12454.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-12481.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13031.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13043.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13146.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13177.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13437.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13471.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13480.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13647.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13773.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13895.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13915.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-13971.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-14024.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-14308.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-14317.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-14396.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-14774.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-14817.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-15011.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-15308.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-15345.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-15346.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-15609.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-15678.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-16106.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-16281.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-16503.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-16792.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-16988.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-17022.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-17139.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-17630.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-17655.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18057.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18087.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18189.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18199.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18532.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18621.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18698.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-18835.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-19007.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-19254.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-19487.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20049.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20154.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20212.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20322.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20442.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20590.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-20639.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21055.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21171.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21379.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21612.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21614.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21627.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-21847.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-22005.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-22714.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-22840.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-23117.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-23191.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-23262.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-24066.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-24102.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-24152.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-24213.json filter=lfs diff=lfs merge=lfs -text +tags_sympy__sympy-24909.json filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..785ababbca057cff4340d44e97aa216871590631 --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +This repo contains cached trajectories of [RepoGraph](https://github.com/ozyyshr/RepoGraph). diff --git a/tags_astropy__astropy-12907.json b/tags_astropy__astropy-12907.json new file mode 100644 index 0000000000000000000000000000000000000000..f43ef503bad4e6384a9d1ff394e6e85b8ccf614e --- /dev/null +++ b/tags_astropy__astropy-12907.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07c78dfa28a7c8e8f8506988231e9c0a33bce7536d62c309ed377d543edff548 +size 53094900 diff --git a/tags_astropy__astropy-14182.json b/tags_astropy__astropy-14182.json new file mode 100644 index 0000000000000000000000000000000000000000..6c694c0db8d2677a5c1dfe462b3f2f45957a8da0 --- /dev/null +++ b/tags_astropy__astropy-14182.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:344b5e80ec288eaee4689e750d1ac071b638a285c0062a7c55fa6f73eb564e06 +size 53818497 diff --git a/tags_astropy__astropy-14365.json b/tags_astropy__astropy-14365.json new file mode 100644 index 0000000000000000000000000000000000000000..40783be028bc938e76d0359162fb498d684ec891 --- /dev/null +++ b/tags_astropy__astropy-14365.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:006793816876018dbd16be5a3a3fac79169858cdb9c26cad28eab36de1af381b +size 54016450 diff --git a/tags_astropy__astropy-14995.json b/tags_astropy__astropy-14995.json new file mode 100644 index 0000000000000000000000000000000000000000..975f61501e52ee9c9bc219ae27d955f4282f814d --- /dev/null +++ b/tags_astropy__astropy-14995.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4ac6a8e1fb03e390faeb4ea0dfd11848551b5e8436176d3f1eee54fd423fb5b +size 53566519 diff --git a/tags_astropy__astropy-6938.json b/tags_astropy__astropy-6938.json new file mode 100644 index 0000000000000000000000000000000000000000..9f6310bd6a93ad9c1bc01289000f5b0823aec746 --- /dev/null +++ b/tags_astropy__astropy-6938.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c94adfd4cf20906651a9a0625dada44f5c4531c4bd1a69891ef4cb585beb806c +size 36295830 diff --git a/tags_astropy__astropy-7746.json b/tags_astropy__astropy-7746.json new file mode 100644 index 0000000000000000000000000000000000000000..b5c8a2533b6501e9b4d5dd64f0701de5e634e998 --- /dev/null +++ b/tags_astropy__astropy-7746.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4678caf352cfee18cf825a98b6139bdfe1856ffe27824b2cc8eb60156512da1e +size 39115607 diff --git a/tags_django__django-10914.json b/tags_django__django-10914.json new file mode 100644 index 0000000000000000000000000000000000000000..0231600154f8b2b69c582bd947af5fab67f342d8 --- /dev/null +++ b/tags_django__django-10914.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01ef4e99f01d5cd3dde9acf256f7f4eac86d97668d9eaca9753d5ed5da863ab9 +size 36566401 diff --git a/tags_django__django-10924.json b/tags_django__django-10924.json new file mode 100644 index 0000000000000000000000000000000000000000..81f4a9cb076b448cc3e860bcd5f77da5ee9337ef --- /dev/null +++ b/tags_django__django-10924.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd238e2a9ae1810ffe07d098323b84287a3830783898ab325546d534ad5507e +size 36939714 diff --git a/tags_django__django-11001.json b/tags_django__django-11001.json new file mode 100644 index 0000000000000000000000000000000000000000..9eb31673b745d5ea5643f01c7bf3a82f3464e60c --- /dev/null +++ b/tags_django__django-11001.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a5189a6f027caabf79f96728f07c498fadba7a157a7e454edfcc6ea4bf9587a8 +size 36940064 diff --git a/tags_django__django-11019.json b/tags_django__django-11019.json new file mode 100644 index 0000000000000000000000000000000000000000..3280e9c8b80097859f8f31be8ef0614cb0efa085 --- /dev/null +++ b/tags_django__django-11019.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59549f969a71ef14d05b27426767151a69eb65b039270ce54234a05f79bec8ea +size 36635009 diff --git a/tags_django__django-11039.json b/tags_django__django-11039.json new file mode 100644 index 0000000000000000000000000000000000000000..7fc6116f97949f9a7eeffcdc5ebe966d4ec5c1c2 --- /dev/null +++ b/tags_django__django-11039.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14ac36fb8aaa4affe4a6db1338343c094edf2d4493f2e8f95d28df36219ed0b3 +size 36685560 diff --git a/tags_django__django-11049.json b/tags_django__django-11049.json new file mode 100644 index 0000000000000000000000000000000000000000..c10c442faa343bc263a29bb971464fdb5421a67e --- /dev/null +++ b/tags_django__django-11049.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4ae013218e419aa157e4d20ee43dde36bea87579f57ede20a2c0d368cd92d3f +size 36684517 diff --git a/tags_django__django-11099.json b/tags_django__django-11099.json new file mode 100644 index 0000000000000000000000000000000000000000..e3c636812be1163ecbfe77e6bc25a8fe8da7b7e6 --- /dev/null +++ b/tags_django__django-11099.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16849fff2bfe522e2d1dbf44f1ebd70b9840f421f7446646cf40a29fc09299e9 +size 36836406 diff --git a/tags_django__django-11133.json b/tags_django__django-11133.json new file mode 100644 index 0000000000000000000000000000000000000000..fa0f23b6309e49d283e564ffb35954a795af5d33 --- /dev/null +++ b/tags_django__django-11133.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:324a363721dd46e440a9e51114773d778a4a61d5cde303ea389d0cef82a43951 +size 36894953 diff --git a/tags_django__django-11179.json b/tags_django__django-11179.json new file mode 100644 index 0000000000000000000000000000000000000000..3fccd1faba0e52b4d2a9afe43afe36f8b7a87872 --- /dev/null +++ b/tags_django__django-11179.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f27f4ec51b8bd30313292d68ee38ddf4f75f585ba6a16c3b3d1b2b04a200d9f +size 36909974 diff --git a/tags_django__django-11283.json b/tags_django__django-11283.json new file mode 100644 index 0000000000000000000000000000000000000000..10ce723641d651124d2683338399d41e5f36f07e --- /dev/null +++ b/tags_django__django-11283.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5881c143a3acf500f28ac6ccd5ed0faa665a02a372546f94a6eb2b5a7ca7af67 +size 36930804 diff --git a/tags_django__django-11422.json b/tags_django__django-11422.json new file mode 100644 index 0000000000000000000000000000000000000000..a2451c45e0b1d05b3918714ee6dbd73e1e80ad02 --- /dev/null +++ b/tags_django__django-11422.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0aed29a249c3f65961a7e071b8e375d982868fe40e7fe0c4f1d42203cc4ea165 +size 37016761 diff --git a/tags_django__django-11564.json b/tags_django__django-11564.json new file mode 100644 index 0000000000000000000000000000000000000000..e689ded9f150e0fa13d3d24711f7f0d5cb086301 --- /dev/null +++ b/tags_django__django-11564.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88bad09c18ef82b113f2131016c555f8d23f516d237558a196b129a314223903 +size 37942981 diff --git a/tags_django__django-11583.json b/tags_django__django-11583.json new file mode 100644 index 0000000000000000000000000000000000000000..2bcd064db0116f0fe160f026ba9b8104f6133987 --- /dev/null +++ b/tags_django__django-11583.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:189015ea8379b46966702c493b11a856a2a06df27d985d21e1bdcde85d889e1e +size 37381691 diff --git a/tags_django__django-11620.json b/tags_django__django-11620.json new file mode 100644 index 0000000000000000000000000000000000000000..9f2485faf3941da8d1c29c4c9f7ee8ac1f76f75c --- /dev/null +++ b/tags_django__django-11620.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:407c2a28769a5c9e85b77c7a348dfaf9558bf5de183691bc78fbc001f585676f +size 37521547 diff --git a/tags_django__django-11630.json b/tags_django__django-11630.json new file mode 100644 index 0000000000000000000000000000000000000000..ed29647698451642bf7396f4a9194ba971e61d24 --- /dev/null +++ b/tags_django__django-11630.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5db9a8d60a84c15e8b6862a3066c991136841ce4d2447aaecfd8e47c18b5db5c +size 37521939 diff --git a/tags_django__django-11742.json b/tags_django__django-11742.json new file mode 100644 index 0000000000000000000000000000000000000000..900107ccc7901246fecd953252e7b7ba1ca844e8 --- /dev/null +++ b/tags_django__django-11742.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a774a5ca73c73c7ec787c77b743a9d84bfca2a9975c618f01b07db472cf5022a +size 37844679 diff --git a/tags_django__django-11797.json b/tags_django__django-11797.json new file mode 100644 index 0000000000000000000000000000000000000000..cfcad48d67d8ac2c23f11f9de3b57d41fcccd179 --- /dev/null +++ b/tags_django__django-11797.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:553820b79f2577f6addd3354ac5175e6565a3b9a0889e91dadec175a5720167f +size 37918921 diff --git a/tags_django__django-11815.json b/tags_django__django-11815.json new file mode 100644 index 0000000000000000000000000000000000000000..97a93417e3407b34e16980293bdc8f28186378e8 --- /dev/null +++ b/tags_django__django-11815.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d131e6799f9eddf1d9724e972333d4f2d76c4f30c8956d3732c8bff9de4d6ba0 +size 37962665 diff --git a/tags_django__django-11848.json b/tags_django__django-11848.json new file mode 100644 index 0000000000000000000000000000000000000000..183569b36ec330c71d1dfdf90b71c5944da35e4b --- /dev/null +++ b/tags_django__django-11848.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fcfe87712364b95969c3ed372db83e16476bd75d678fb0e25eff340b45c1880 +size 37962016 diff --git a/tags_django__django-11905.json b/tags_django__django-11905.json new file mode 100644 index 0000000000000000000000000000000000000000..80573f189fb07f48c400a2164ab0e2f2483ad4e1 --- /dev/null +++ b/tags_django__django-11905.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7daf06a856bb3c981e68e6d28279da59bd380fdaefd16e1394af70254747214 +size 38002341 diff --git a/tags_django__django-11910.json b/tags_django__django-11910.json new file mode 100644 index 0000000000000000000000000000000000000000..311fe0c853f94d8c24f90699d8d5429de13f908b --- /dev/null +++ b/tags_django__django-11910.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:924d99d5c13d58ff548812de41aa9bebb44a5dc4608e1003d3dc2bae1772909a +size 37980520 diff --git a/tags_django__django-11964.json b/tags_django__django-11964.json new file mode 100644 index 0000000000000000000000000000000000000000..f160e4c724c0cb21557e9eaf6c8e89885230a878 --- /dev/null +++ b/tags_django__django-11964.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a1668c750265670de75ea17f34c8405bb6f00ce250f77cde94e723320eef8e0 +size 38172311 diff --git a/tags_django__django-11999.json b/tags_django__django-11999.json new file mode 100644 index 0000000000000000000000000000000000000000..729447cb66079d1909bb5cb8bab13c76f814bc84 --- /dev/null +++ b/tags_django__django-11999.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25d5311794732db66b331d39deb6186faff9432fe1c7b11e4dfb366e5f23bdd8 +size 38223520 diff --git a/tags_django__django-12113.json b/tags_django__django-12113.json new file mode 100644 index 0000000000000000000000000000000000000000..e12176e1a70b205dae4e357dc04e2c8faf06ce94 --- /dev/null +++ b/tags_django__django-12113.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ede79b232a5b0669c5e9823dca40973ccab4a83213eee5a032b6b7273e5683d8 +size 38368436 diff --git a/tags_django__django-12125.json b/tags_django__django-12125.json new file mode 100644 index 0000000000000000000000000000000000000000..507da2a95feb9c9932b88641ae8d6145cb0af2a1 --- /dev/null +++ b/tags_django__django-12125.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e4dc529ca5e32fadfbf7960e809493a82a509ed532f7184b446ba2f9d28bf3c +size 38375895 diff --git a/tags_django__django-12184.json b/tags_django__django-12184.json new file mode 100644 index 0000000000000000000000000000000000000000..0f0cc875a3e634cb281dc48d57c13e5bdcdeac89 --- /dev/null +++ b/tags_django__django-12184.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b71134c13bf715a7eec2d90c499e027aeca968ee0e531a667bf4b641475b9e2 +size 38461583 diff --git a/tags_django__django-12284.json b/tags_django__django-12284.json new file mode 100644 index 0000000000000000000000000000000000000000..c619403aed51a82defc8c22c711887883a33deaa --- /dev/null +++ b/tags_django__django-12284.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67d0aec181eae64c5a7f1daa9da9db170feca388c3c5babaaab5ea019cf6a736 +size 38654132 diff --git a/tags_django__django-12286.json b/tags_django__django-12286.json new file mode 100644 index 0000000000000000000000000000000000000000..5959ad511cf650c9e61bd574ce75540f6ac92c09 --- /dev/null +++ b/tags_django__django-12286.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce450c116b65b664132b55d43dadbfd7e45b2a69e1ade31d6c58e55a62bbf551 +size 38609065 diff --git a/tags_django__django-12308.json b/tags_django__django-12308.json new file mode 100644 index 0000000000000000000000000000000000000000..f41bd16a4cb6acf35c903b8435cad5dea82cee40 --- /dev/null +++ b/tags_django__django-12308.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39eb62d7b0e065df39d511d8f660f64cd425dd7ca1a3b91e73f72a7e3d0a5d8e +size 39507329 diff --git a/tags_django__django-12453.json b/tags_django__django-12453.json new file mode 100644 index 0000000000000000000000000000000000000000..812f2db4e0ca7a1a70e72677975d4939fde66274 --- /dev/null +++ b/tags_django__django-12453.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca6a30dc5a9806abe4eecc6a7e40df506c4c25e663c7b0078bd9188adcca2544 +size 38763844 diff --git a/tags_django__django-12470.json b/tags_django__django-12470.json new file mode 100644 index 0000000000000000000000000000000000000000..9b723098d356149201ed424e73aadb489e3fdb5b --- /dev/null +++ b/tags_django__django-12470.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc6f6316859c18490c5843e64a93d3d69f1e44888a92554cf564310626aa304b +size 38915583 diff --git a/tags_django__django-12497.json b/tags_django__django-12497.json new file mode 100644 index 0000000000000000000000000000000000000000..de435ce54aaf5508b9cb25799d9cf81cdc508bcf --- /dev/null +++ b/tags_django__django-12497.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c10662cc8c97a1c3beba61f3da1da545bd9cb4a9cefe7940f716e5041a35e41a +size 38803459 diff --git a/tags_django__django-12589.json b/tags_django__django-12589.json new file mode 100644 index 0000000000000000000000000000000000000000..e28db14f5d27efaa1defae7be4172b60a17cf762 --- /dev/null +++ b/tags_django__django-12589.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04a4ed0ff32bf7e6dcb3a7b6fd76dcccb4b1fdc9b4164789082c3b37efceed8b +size 38925681 diff --git a/tags_django__django-12700.json b/tags_django__django-12700.json new file mode 100644 index 0000000000000000000000000000000000000000..adeb9466a3c4be3902e77c0656b19620f5f5f0f3 --- /dev/null +++ b/tags_django__django-12700.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b0cc86bfcfd09e33bdac86e49dac6c50481e5e300102dd8dd754c5370147afb +size 39197056 diff --git a/tags_django__django-12708.json b/tags_django__django-12708.json new file mode 100644 index 0000000000000000000000000000000000000000..8ae346ccbd539778db2b455a4ce029ede01af880 --- /dev/null +++ b/tags_django__django-12708.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e566729a9c21524c19f797161918b29386a9a920c8414236fccb577b57f4158 +size 39251229 diff --git a/tags_django__django-12747.json b/tags_django__django-12747.json new file mode 100644 index 0000000000000000000000000000000000000000..9ceb0a622a8106a7abc7992714d195cf53832aad --- /dev/null +++ b/tags_django__django-12747.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d88c91aeb48eafaa1d1b2a9b2f6d8b1227d5b16d57183976588d036bfd34f44d +size 39209287 diff --git a/tags_django__django-12856.json b/tags_django__django-12856.json new file mode 100644 index 0000000000000000000000000000000000000000..c2a20a311520096e90e82e600f5a1ae61fc8ebfe --- /dev/null +++ b/tags_django__django-12856.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57544cd446b533e490bffd5a82c0583baed2ac73adb14a861819f9689c2c0d8e +size 39518143 diff --git a/tags_django__django-12908.json b/tags_django__django-12908.json new file mode 100644 index 0000000000000000000000000000000000000000..98e218c06bb212e6dc711b3fc088cff7ac4b373e --- /dev/null +++ b/tags_django__django-12908.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62a28575c99410a1d420d14d58ea4623158136d2b03712899a5162e0e94de929 +size 39493891 diff --git a/tags_django__django-12915.json b/tags_django__django-12915.json new file mode 100644 index 0000000000000000000000000000000000000000..0dc5b7a5898a853a2be9f81fce1de0282316b15e --- /dev/null +++ b/tags_django__django-12915.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d5dd713573447f0871864a6c625abf3c44e8e9f010b4e4f0be179004a0e4745 +size 39784443 diff --git a/tags_django__django-12983.json b/tags_django__django-12983.json new file mode 100644 index 0000000000000000000000000000000000000000..da430fee835c49c2f1b41c7297e0cb7cb21a021a --- /dev/null +++ b/tags_django__django-12983.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6ea850e948897293d3ecda18b1d3099a2ff60529d1899e549a7de9d572a1594 +size 39650663 diff --git a/tags_django__django-13028.json b/tags_django__django-13028.json new file mode 100644 index 0000000000000000000000000000000000000000..2103c2b162b76732a3dc01a39628c23499f7638b --- /dev/null +++ b/tags_django__django-13028.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4111bd80aaf8e53345cc6cea1829598a94f0b1c0cce068683160a0f4c5a484ad +size 39781128 diff --git a/tags_django__django-13033.json b/tags_django__django-13033.json new file mode 100644 index 0000000000000000000000000000000000000000..f25e60984174915f5ef1deda8c35f0a615c0a084 --- /dev/null +++ b/tags_django__django-13033.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fb6cb0bcfcddec57b8e93839ebc118ff651485c67bb503c47023182f0cb45e1 +size 39788179 diff --git a/tags_django__django-13158.json b/tags_django__django-13158.json new file mode 100644 index 0000000000000000000000000000000000000000..538e451d8b83137ed0c269233117e7302c4c1cdd --- /dev/null +++ b/tags_django__django-13158.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16d5fa19f6db3b6eeb3bc20a340e62643f8d4d16924c1cc79b196b6e07dc91a6 +size 39936979 diff --git a/tags_django__django-13220.json b/tags_django__django-13220.json new file mode 100644 index 0000000000000000000000000000000000000000..ce03bbda4f790d73fc0b8ca72d92b4f4253e9faa --- /dev/null +++ b/tags_django__django-13220.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8770171710614f2f364ddb34450a4937b60b9302136a7c59d85a7e00605e9230 +size 40023807 diff --git a/tags_django__django-13230.json b/tags_django__django-13230.json new file mode 100644 index 0000000000000000000000000000000000000000..fa42f538614f1f041086c133ddb20d1a25e7715e --- /dev/null +++ b/tags_django__django-13230.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2033634884a2236c9b98b7ceb443c04cd3cc6e199e3d8d1851989544a7353505 +size 40026424 diff --git a/tags_django__django-13265.json b/tags_django__django-13265.json new file mode 100644 index 0000000000000000000000000000000000000000..5331bec1f353ed6a15a5dd7406aee617368f7dbc --- /dev/null +++ b/tags_django__django-13265.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d69c7bb90c9b5a9b4812426b1b53cba2ae79c13b96dde5d02416ad2a961ed402 +size 40139138 diff --git a/tags_django__django-13315.json b/tags_django__django-13315.json new file mode 100644 index 0000000000000000000000000000000000000000..18522b13db02010b298852746b741c086e7fff6c --- /dev/null +++ b/tags_django__django-13315.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de3ac49d8f60f7882cb01a0a49a1f2890a9a4d8f1c30ff5a51e86b19078a0131 +size 40585568 diff --git a/tags_django__django-13321.json b/tags_django__django-13321.json new file mode 100644 index 0000000000000000000000000000000000000000..baf7713170294e09b404513f40c5dbefbba0306f --- /dev/null +++ b/tags_django__django-13321.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15409425509632b68a41708c12b6fea29c67f86af30c59c7916be71037211f9f +size 40269287 diff --git a/tags_django__django-13401.json b/tags_django__django-13401.json new file mode 100644 index 0000000000000000000000000000000000000000..e31d33ac0b533b1725474c65d346accdbac72081 --- /dev/null +++ b/tags_django__django-13401.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8e3df4b0167ffae968e16dd8234dde6be4aecbba4f09965ff2c76cfee7d0d69 +size 40225775 diff --git a/tags_django__django-13447.json b/tags_django__django-13447.json new file mode 100644 index 0000000000000000000000000000000000000000..fc10dfb2af169dc3be94af88f64778e3c4fb3c4d --- /dev/null +++ b/tags_django__django-13447.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bed14aee7aa66aaf06b5afd0756e1991a78bfa2c4c859e678893cd8b9260a8c1 +size 41181177 diff --git a/tags_django__django-13448.json b/tags_django__django-13448.json new file mode 100644 index 0000000000000000000000000000000000000000..f569bdc9b281550bf01bc70a07acdb0c428e14f2 --- /dev/null +++ b/tags_django__django-13448.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:114594863f7b277e4654ea99a3047c0f372ccdd7358b158ebbb465c53c763bc7 +size 40453328 diff --git a/tags_django__django-13551.json b/tags_django__django-13551.json new file mode 100644 index 0000000000000000000000000000000000000000..4d4d5eb837e055bd5eccbd92d5a6ff43f265c2b8 --- /dev/null +++ b/tags_django__django-13551.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bddef8b2411b98a2165af7273d8bfbe96ea296b8f65d1d70cd9edfb49971c03 +size 40491192 diff --git a/tags_django__django-13590.json b/tags_django__django-13590.json new file mode 100644 index 0000000000000000000000000000000000000000..ff86b4efed9335e51d77095fbda845dcf79a5436 --- /dev/null +++ b/tags_django__django-13590.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0131d5ee0e38646db5a67b238aeea92fa22900bffdd4e89ea66fc91b2cd558e +size 40516601 diff --git a/tags_django__django-13658.json b/tags_django__django-13658.json new file mode 100644 index 0000000000000000000000000000000000000000..fe19ee84e2c71ba772670be3eeb7400729333c55 --- /dev/null +++ b/tags_django__django-13658.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aae86db3972911dc6f39a9514f837672f3375f4c7567aa93e2d962a3f2346d7d +size 40687480 diff --git a/tags_django__django-13660.json b/tags_django__django-13660.json new file mode 100644 index 0000000000000000000000000000000000000000..f6430ca1c74da6e3804f68fce9cf515f392f3e36 --- /dev/null +++ b/tags_django__django-13660.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:175eb3744d535bc1891c81af56b615f2239e624559d2b238e92c81b6a74c6a05 +size 40690315 diff --git a/tags_django__django-13710.json b/tags_django__django-13710.json new file mode 100644 index 0000000000000000000000000000000000000000..de48fcfc653de8e098c2dd89251a85b074092492 --- /dev/null +++ b/tags_django__django-13710.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1171c497e90ee0377f8387cd0b0e21b8d0c33391e4d3581d70478411b07225af +size 41992870 diff --git a/tags_django__django-13757.json b/tags_django__django-13757.json new file mode 100644 index 0000000000000000000000000000000000000000..6a14524cd9171d1615330526c1da96b04d83d68a --- /dev/null +++ b/tags_django__django-13757.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74a3cca7910c9043ffa90abe6d5975e9bd6ce12e34792648b95b1b17ccd0e593 +size 40772621 diff --git a/tags_django__django-13768.json b/tags_django__django-13768.json new file mode 100644 index 0000000000000000000000000000000000000000..300593b5b20062072a7ecfbbe72d2920b2ca47ac --- /dev/null +++ b/tags_django__django-13768.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e599a2053a858e5dcae4e5cd65bedb9d5951c60fc1eaad42da97f6cd4cf1eb3 +size 40773028 diff --git a/tags_django__django-13925.json b/tags_django__django-13925.json new file mode 100644 index 0000000000000000000000000000000000000000..5da8502a92a79c0c093a5270e7d3daf6da8a11ef --- /dev/null +++ b/tags_django__django-13925.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44a83bd990c8d376f598c6bedfa387059f6c05ad26f25a24a893f268a1a61dbe +size 40876686 diff --git a/tags_django__django-13933.json b/tags_django__django-13933.json new file mode 100644 index 0000000000000000000000000000000000000000..34e82261b3871c7ad9b560b7659d4239dd86bc70 --- /dev/null +++ b/tags_django__django-13933.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99c2100dfd27fd4b025cbca9267d0d5e33e2d277bc3e95d69f6fbd14442e13bc +size 40889015 diff --git a/tags_django__django-13964.json b/tags_django__django-13964.json new file mode 100644 index 0000000000000000000000000000000000000000..110c7a4b3ebdd3fd22a5d0879c8ac02d6722668d --- /dev/null +++ b/tags_django__django-13964.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c485ed82e0584a9f9e7eeae64fe6620d1f0bbc54a2d18fd6be69a47291186762 +size 40884814 diff --git a/tags_django__django-14016.json b/tags_django__django-14016.json new file mode 100644 index 0000000000000000000000000000000000000000..e48cc13b5b658b7d4b9f911a6130d2debbc5197f --- /dev/null +++ b/tags_django__django-14016.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0c421f3d2755bc110ab302dcde889e29a2448cbeab6af03130c1f2328208aa1 +size 40905453 diff --git a/tags_django__django-14017.json b/tags_django__django-14017.json new file mode 100644 index 0000000000000000000000000000000000000000..c08b0a6a42ffe5cfc4e6a5c5f99d1ff61f67c2bc --- /dev/null +++ b/tags_django__django-14017.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:973fa858f79ccd66f318533162caec8c21621ba345819915b0e6752c68524b2c +size 40906141 diff --git a/tags_django__django-14155.json b/tags_django__django-14155.json new file mode 100644 index 0000000000000000000000000000000000000000..5a345accc5f318e6d284dc8105b661584ed6ce04 --- /dev/null +++ b/tags_django__django-14155.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:287b3add233b0248a87edae5f6258e4ff461dc3117676fefb3d9809c2e87e363 +size 41165844 diff --git a/tags_django__django-14238.json b/tags_django__django-14238.json new file mode 100644 index 0000000000000000000000000000000000000000..69c88b73674efd5f434b2cbd67a263f48db07dda --- /dev/null +++ b/tags_django__django-14238.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f25863cdbfbcac29f83eed27e14e1812d6216d6398a6510336e94af5fe5bed3 +size 41237292 diff --git a/tags_django__django-14382.json b/tags_django__django-14382.json new file mode 100644 index 0000000000000000000000000000000000000000..73bf7493f5242575a62e2286cf70a546e2a90ea1 --- /dev/null +++ b/tags_django__django-14382.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:547e9806bc0451ae2583e21f94d8b25a0516eec2e4199bd40ba51067fdc77da7 +size 41183461 diff --git a/tags_django__django-14411.json b/tags_django__django-14411.json new file mode 100644 index 0000000000000000000000000000000000000000..2a4bb2eed5d643754eadf2e3bbbc9c0799093256 --- /dev/null +++ b/tags_django__django-14411.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3f1882a0b31a6a638818bb056a8850cf682be2fc3ae8527711f8c4f9db271d1 +size 41234050 diff --git a/tags_django__django-14534.json b/tags_django__django-14534.json new file mode 100644 index 0000000000000000000000000000000000000000..f94d29fbf53e4ff79fbc71ceae314079bc175e7e --- /dev/null +++ b/tags_django__django-14534.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d12747cbd5d124cbb90872274c7e6d4642360e4d14c2c443d358da3c8465c433 +size 41775546 diff --git a/tags_django__django-14580.json b/tags_django__django-14580.json new file mode 100644 index 0000000000000000000000000000000000000000..90ef4a9f17245fb0e94e9f7e3e4adcefde30f167 --- /dev/null +++ b/tags_django__django-14580.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e4ba33daeb0026f1bd214e007291157d3a6cf27ef7ab892bde4808ad2e5c4ed +size 41549104 diff --git a/tags_django__django-14608.json b/tags_django__django-14608.json new file mode 100644 index 0000000000000000000000000000000000000000..ea210cb63e072245dbfc6b252d4aa1c71e985656 --- /dev/null +++ b/tags_django__django-14608.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f165392363c41567acdaed8d0a332313bc8c3a46a2ad9df7532e9b682f62cdbc +size 41624196 diff --git a/tags_django__django-14667.json b/tags_django__django-14667.json new file mode 100644 index 0000000000000000000000000000000000000000..507ab9a3a3d0e460093d0019213c192a28c9e6c5 --- /dev/null +++ b/tags_django__django-14667.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e590ce46cff5f31ea5aa5020b6c7c49ac4504d1ed3bac34adf73208682d5dab6 +size 41619723 diff --git a/tags_django__django-14672.json b/tags_django__django-14672.json new file mode 100644 index 0000000000000000000000000000000000000000..0312057dea0465fa0eac1fa74b7a43d117a5ece6 --- /dev/null +++ b/tags_django__django-14672.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eeb07248c66740157455f377ed18433ca7c7639e3db2643a454ae6b1a069baa +size 41716033 diff --git a/tags_django__django-14730.json b/tags_django__django-14730.json new file mode 100644 index 0000000000000000000000000000000000000000..405ce56b9da63ebc88bdad8745b8871cfd1904ba --- /dev/null +++ b/tags_django__django-14730.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99cac4d855f34bfd65009c923306a454bb9925d45882cb4464234898efa49053 +size 41784794 diff --git a/tags_django__django-14752.json b/tags_django__django-14752.json new file mode 100644 index 0000000000000000000000000000000000000000..a65fc0df27e0227c9698328bd1fb060e81ef7b83 --- /dev/null +++ b/tags_django__django-14752.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6865978a27fc4a8bad97e7482ae4bb2cdd842b55bdfdcd4f95ba607cabc78b7e +size 41781004 diff --git a/tags_django__django-14787.json b/tags_django__django-14787.json new file mode 100644 index 0000000000000000000000000000000000000000..3429646ead3f5ee355051a663be1d2feca4afc17 --- /dev/null +++ b/tags_django__django-14787.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81fe9722ac65b2ca9b1c0753f4fc864eba934d025a137cbe97ad47b809c8e5a6 +size 42131976 diff --git a/tags_django__django-14855.json b/tags_django__django-14855.json new file mode 100644 index 0000000000000000000000000000000000000000..37790d891e56c90a704da042cf5ad85e4df64c62 --- /dev/null +++ b/tags_django__django-14855.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72047502d1f4ada60bed12b0f5469383a2194c0ad0ca21cf7f56328280c172ba +size 42063147 diff --git a/tags_django__django-14915.json b/tags_django__django-14915.json new file mode 100644 index 0000000000000000000000000000000000000000..81070c5301f76f2db0482ddaec5ed4aa91e33252 --- /dev/null +++ b/tags_django__django-14915.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f25e9c0b7c8f5b2458df87413bdfeee4f256d68b472d4800f29707d0c38ef4fc +size 42051167 diff --git a/tags_django__django-14997.json b/tags_django__django-14997.json new file mode 100644 index 0000000000000000000000000000000000000000..df3de18922699e80b7c83243cf56bb0b1fe868b0 --- /dev/null +++ b/tags_django__django-14997.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2813c87ae576d8fc867bf4a1d31fd0549f8f4e7c020856b3023102f71132c069 +size 42106100 diff --git a/tags_django__django-14999.json b/tags_django__django-14999.json new file mode 100644 index 0000000000000000000000000000000000000000..486a440777a85e1a67b1ad7ed036f1550db5f7fb --- /dev/null +++ b/tags_django__django-14999.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56fe6226701804a93d6ffef8a670dcec5e4ff093ce60de85d55dfd0bb3fe04de +size 42257359 diff --git a/tags_django__django-15061.json b/tags_django__django-15061.json new file mode 100644 index 0000000000000000000000000000000000000000..fae82145c6044c1a13ed4630551f4ac43c155fdd --- /dev/null +++ b/tags_django__django-15061.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a20bf01f9c97a8f815d1d70f93198f1dd191bbd3e9aba726ba741bdf9c9fb809 +size 42310517 diff --git a/tags_django__django-15202.json b/tags_django__django-15202.json new file mode 100644 index 0000000000000000000000000000000000000000..0fc2104f09f7603807feb68a96096ed1754e0e41 --- /dev/null +++ b/tags_django__django-15202.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6954ec0c4e4824c26d35c3a40089b481d5e95f4c1e5964b6f4189816d85eba6 +size 42410362 diff --git a/tags_django__django-15213.json b/tags_django__django-15213.json new file mode 100644 index 0000000000000000000000000000000000000000..66d12f7521275c53ebbbb5db6d8e3bb5b6a08cc8 --- /dev/null +++ b/tags_django__django-15213.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11ada5835e296474270e327f7311da1cebb19bfa9d8c12ceb404cdd54a87b817 +size 42410351 diff --git a/tags_django__django-15252.json b/tags_django__django-15252.json new file mode 100644 index 0000000000000000000000000000000000000000..7bf25895d885f78d8dd5c763fcc48079e14d9fb4 --- /dev/null +++ b/tags_django__django-15252.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef2ecdb2fe8a4f0f18dc63431477465c344bf4c42e573cd71874eb1f93f59f23 +size 42441177 diff --git a/tags_django__django-15320.json b/tags_django__django-15320.json new file mode 100644 index 0000000000000000000000000000000000000000..27b15c2fe34b09d765b48ebf960890be681ccc15 --- /dev/null +++ b/tags_django__django-15320.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:105d583033a59c88947156e1459b2d87d6eb3731c0ac5d675aea0db75154fd23 +size 42468529 diff --git a/tags_django__django-15347.json b/tags_django__django-15347.json new file mode 100644 index 0000000000000000000000000000000000000000..4efdc6d7d1d9b4400186d27d38e69c3f91840e3a --- /dev/null +++ b/tags_django__django-15347.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8270468748711fdb8239195b76219dc9999a8fb4d722ad132d8160e8cbc4e4be +size 42692727 diff --git a/tags_django__django-15388.json b/tags_django__django-15388.json new file mode 100644 index 0000000000000000000000000000000000000000..7dcee1af4b7bd90e36fcb5c0ad624ef6f308b0d6 --- /dev/null +++ b/tags_django__django-15388.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2326f7df1113d992fd4a42fde518a8192b3f89ba7f005f5b351d7af231c0bc1c +size 42727084 diff --git a/tags_django__django-15400.json b/tags_django__django-15400.json new file mode 100644 index 0000000000000000000000000000000000000000..2d841c255815b1e92f0f1e2118fa281c2827d8c6 --- /dev/null +++ b/tags_django__django-15400.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:232175c4e9e75075107f7184ee24cdd542097ffd3953d6092499786eedf4f2a9 +size 44143927 diff --git a/tags_django__django-15498.json b/tags_django__django-15498.json new file mode 100644 index 0000000000000000000000000000000000000000..922fd247e6ea0d093d0b2a760fcb983425f79057 --- /dev/null +++ b/tags_django__django-15498.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22be7a710f3bbd14c1f5ce7ffe35a9947740ac80b049aee4593e477e9eb0bf04 +size 44355976 diff --git a/tags_django__django-15695.json b/tags_django__django-15695.json new file mode 100644 index 0000000000000000000000000000000000000000..1e764bdcfc6036a5de37af8ff9ffed1067a0f4f8 --- /dev/null +++ b/tags_django__django-15695.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df8997bff3c037b58f173dfead7368c3914110a884d52bc88faf3e7f15b85ee5 +size 46062758 diff --git a/tags_django__django-15738.json b/tags_django__django-15738.json new file mode 100644 index 0000000000000000000000000000000000000000..5b3e4a4bfbc809ac94d269aadd3b7afb232affb5 --- /dev/null +++ b/tags_django__django-15738.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38804cd41675d0c6c201adb2b7a4d140e0f89c3d55a281c146f1cf6d14734132 +size 46262490 diff --git a/tags_django__django-15781.json b/tags_django__django-15781.json new file mode 100644 index 0000000000000000000000000000000000000000..49bff5e4b207677efd0da2529f20ab69c0d88eef --- /dev/null +++ b/tags_django__django-15781.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35d195d7d6440a478f77945c24c0d1909eb067f483700ad12eacbe070153f3c6 +size 46408412 diff --git a/tags_django__django-15789.json b/tags_django__django-15789.json new file mode 100644 index 0000000000000000000000000000000000000000..6cbc72ab72abe1a9e3108f73d4d27bd83e0305b7 --- /dev/null +++ b/tags_django__django-15789.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27df0e0fb7d47c25ddece9c96ce0da4b4655953463c9509a890f811f225fbe66 +size 46416691 diff --git a/tags_django__django-15790.json b/tags_django__django-15790.json new file mode 100644 index 0000000000000000000000000000000000000000..b39569efb6f593df0c1a45538adcf73a875c6dc5 --- /dev/null +++ b/tags_django__django-15790.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad07afac43eaf60d74cc3aa0dce05d61a7670f024aeda05ed85b61ef9137c604 +size 46410316 diff --git a/tags_django__django-15814.json b/tags_django__django-15814.json new file mode 100644 index 0000000000000000000000000000000000000000..02b5cdf4c06fc47d6943328b6b86a5a621b2dbda --- /dev/null +++ b/tags_django__django-15814.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26447d7e990c2bf99b75dd9c6b2e34f88deb83c4a543744b0942d39c0a7863fd +size 46417766 diff --git a/tags_django__django-15819.json b/tags_django__django-15819.json new file mode 100644 index 0000000000000000000000000000000000000000..6d3699cf554da11012eefbce2f7bf56120a0d58e --- /dev/null +++ b/tags_django__django-15819.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:567f2f7d189361d6790a1bc0425d3b91b52e4737d8b478763bffbed39a00c638 +size 46423980 diff --git a/tags_django__django-15851.json b/tags_django__django-15851.json new file mode 100644 index 0000000000000000000000000000000000000000..439306dbf87c8bf2f67aaaa532a6ea34670320e5 --- /dev/null +++ b/tags_django__django-15851.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9c2a21d0b2fda2d198ca50aa3ca01c5daf10f1ad014176872fea3dc096c10bf +size 46370115 diff --git a/tags_django__django-15902.json b/tags_django__django-15902.json new file mode 100644 index 0000000000000000000000000000000000000000..0ac51dda96827ab4bcd10272f8123144e0aad883 --- /dev/null +++ b/tags_django__django-15902.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:650ca6f51dda487fd740c6208741a44f87084583c615e14b3d724acb09dc895f +size 46424864 diff --git a/tags_django__django-15996.json b/tags_django__django-15996.json new file mode 100644 index 0000000000000000000000000000000000000000..add5a07e4163095af640327b3aa1d4bb85342aab --- /dev/null +++ b/tags_django__django-15996.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de9ad08af4c83e30b93456f40bae7e35ea2fd8c095b1a8ffdd1660d2280f2b66 +size 46519366 diff --git a/tags_django__django-16041.json b/tags_django__django-16041.json new file mode 100644 index 0000000000000000000000000000000000000000..4c23b84831b9b7f0219de7bc7c961e5f8d900a24 --- /dev/null +++ b/tags_django__django-16041.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:511e0e26cc67173ac8ae7c1901f6947e81e521898ad5031fbfa19cf6a389d194 +size 46647041 diff --git a/tags_django__django-16046.json b/tags_django__django-16046.json new file mode 100644 index 0000000000000000000000000000000000000000..bfa264b8b27b9aba23853ddb120a14b24a18f9e9 --- /dev/null +++ b/tags_django__django-16046.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13dc2af5a7bd31ad657b4659357ce30eaaf38b225e9662192bd433d192cde56c +size 46651380 diff --git a/tags_django__django-16139.json b/tags_django__django-16139.json new file mode 100644 index 0000000000000000000000000000000000000000..295a6949c8d99c8ffad7721a3733a613b0b4a5ce --- /dev/null +++ b/tags_django__django-16139.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fd55ee0481437e031f29f715d9a8b8293a8278dcc4dc90a36b315c934cb66b9 +size 47007270 diff --git a/tags_django__django-16229.json b/tags_django__django-16229.json new file mode 100644 index 0000000000000000000000000000000000000000..ddaad73f9c2d130275b03c50ef2f584f96b6d2b9 --- /dev/null +++ b/tags_django__django-16229.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:868b43042fa0125cc83cda2cbd0ace1f5f51b42ff514988836e43b5150999982 +size 47277954 diff --git a/tags_django__django-16255.json b/tags_django__django-16255.json new file mode 100644 index 0000000000000000000000000000000000000000..5854f4079553f44bdcfb9105597f677131d55e7b --- /dev/null +++ b/tags_django__django-16255.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3735f8df5325718805dbe5f01487a79c81f9fa2c9452e6a52c2e43cdaffba41 +size 47228436 diff --git a/tags_django__django-16379.json b/tags_django__django-16379.json new file mode 100644 index 0000000000000000000000000000000000000000..ad80fbf8abc0f89bebcf912f6fe65976048129c6 --- /dev/null +++ b/tags_django__django-16379.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1558c75cb4ea86af9ded9ca7786264ec02afe24caf456240155213151402c841 +size 47275387 diff --git a/tags_django__django-16400.json b/tags_django__django-16400.json new file mode 100644 index 0000000000000000000000000000000000000000..accbc9f6859a58fecc04768e1e8668882bd4f39c --- /dev/null +++ b/tags_django__django-16400.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89b28aa98e5ffa5d9c674df7bcdcabe5bb14ff29a89840a1a036cb45e3d35b7b +size 47393966 diff --git a/tags_django__django-16408.json b/tags_django__django-16408.json new file mode 100644 index 0000000000000000000000000000000000000000..15f09c093ebdf570559c36ece04b98092b89caf9 --- /dev/null +++ b/tags_django__django-16408.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e19370b8135e42c39ccd9e95f438823bdedd8dc8d4db1a40c99920d6104cd9fe +size 47721666 diff --git a/tags_django__django-16527.json b/tags_django__django-16527.json new file mode 100644 index 0000000000000000000000000000000000000000..ff1f596dff4a43de3db95d06a9587c1fe55497a6 --- /dev/null +++ b/tags_django__django-16527.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d7809cb4ee8f27c7c5306f711a89b54bf908552181740dba9d449c0a867cc61 +size 47751599 diff --git a/tags_django__django-16595.json b/tags_django__django-16595.json new file mode 100644 index 0000000000000000000000000000000000000000..16e768415ec9e01221cbc919f4f5d09567962587 --- /dev/null +++ b/tags_django__django-16595.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:298ac94eef5a61cd27303cb906a30310d051428530d2c62715969af1d64f6443 +size 47835353 diff --git a/tags_django__django-16816.json b/tags_django__django-16816.json new file mode 100644 index 0000000000000000000000000000000000000000..12c2ea8184950f23f9f1779d05c5b7ca4fbba411 --- /dev/null +++ b/tags_django__django-16816.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6bfa8ecf105f7c941d9905539064cf732737fe950133b847f5716768140eb69d +size 48204218 diff --git a/tags_django__django-16820.json b/tags_django__django-16820.json new file mode 100644 index 0000000000000000000000000000000000000000..8a4f5080571b818bcf00f67f5a48d597cff243c8 --- /dev/null +++ b/tags_django__django-16820.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d2be979db155ef58957b71027be81266b7f5c6dcd866c46ceb3ac8a7b4fcc7f +size 48200359 diff --git a/tags_django__django-16873.json b/tags_django__django-16873.json new file mode 100644 index 0000000000000000000000000000000000000000..788ef71355780b2bb60318bb43fa9df27add2578 --- /dev/null +++ b/tags_django__django-16873.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e17f36d071044aa0c780da1d1441f4ef787e589a63be0c081a2db6bb947b0e03 +size 48759953 diff --git a/tags_django__django-16910.json b/tags_django__django-16910.json new file mode 100644 index 0000000000000000000000000000000000000000..0dee74e86767495d73262c4191c56421fad32373 --- /dev/null +++ b/tags_django__django-16910.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e87319c73ff9723903127075789c6d09a89eab4c7f5c0f31dfde1e50311f759 +size 48768493 diff --git a/tags_django__django-17051.json b/tags_django__django-17051.json new file mode 100644 index 0000000000000000000000000000000000000000..52ed8c51f53466496d9f12fc4420b9ba13f0b496 --- /dev/null +++ b/tags_django__django-17051.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea867c62006bb147b933835400f52d060459f4d644e202792185a19031a2223f +size 48943710 diff --git a/tags_django__django-17087.json b/tags_django__django-17087.json new file mode 100644 index 0000000000000000000000000000000000000000..57714261a2d139d5ef9cbe36953dc6f2fc06ad24 --- /dev/null +++ b/tags_django__django-17087.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f2f54bc1e4c2256ba2592d3c5454fc2c32b4b877355f2d4b003b60bf8ba3f92 +size 48960907 diff --git a/tags_matplotlib__matplotlib-18869.json b/tags_matplotlib__matplotlib-18869.json new file mode 100644 index 0000000000000000000000000000000000000000..82f31086944a14b0717a2bc440f511155f324c29 --- /dev/null +++ b/tags_matplotlib__matplotlib-18869.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7c830a072c7ffe071cb4bdc52bb28d78d225afeb87c65191afa7cc0eff21fb4 +size 56767346 diff --git a/tags_matplotlib__matplotlib-22711.json b/tags_matplotlib__matplotlib-22711.json new file mode 100644 index 0000000000000000000000000000000000000000..c4dda8af99ae9e872bc9c102abfeb534a2a42469 --- /dev/null +++ b/tags_matplotlib__matplotlib-22711.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdb6d411bee120fd6e32c3729f8840798aa38c1594de38f16f543b9f2925a8cc +size 56979886 diff --git a/tags_matplotlib__matplotlib-22835.json b/tags_matplotlib__matplotlib-22835.json new file mode 100644 index 0000000000000000000000000000000000000000..a0259cbad4b63e2048718bf6b600866384a63c3e --- /dev/null +++ b/tags_matplotlib__matplotlib-22835.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8193d2ce395e199ac844cb65c7db36ed24949038900cf3b4e088f88d5bf597bb +size 56083980 diff --git a/tags_matplotlib__matplotlib-23299.json b/tags_matplotlib__matplotlib-23299.json new file mode 100644 index 0000000000000000000000000000000000000000..1ea978677fa56e2e6ba1583cde7793e29c032946 --- /dev/null +++ b/tags_matplotlib__matplotlib-23299.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95dc120aa87e71a41e20e2d3c767b235c1e3e20f7de900d18da1dd3136266b3e +size 55899310 diff --git a/tags_matplotlib__matplotlib-23314.json b/tags_matplotlib__matplotlib-23314.json new file mode 100644 index 0000000000000000000000000000000000000000..57b6275c1c663fc5fc560e165b522e3347b8338c --- /dev/null +++ b/tags_matplotlib__matplotlib-23314.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecf3be94d22c32079c18a6d638d982c9ac828ead19a93335f1e7a5ea9768f39f +size 55769782 diff --git a/tags_matplotlib__matplotlib-23476.json b/tags_matplotlib__matplotlib-23476.json new file mode 100644 index 0000000000000000000000000000000000000000..bdf78aefc78e45cb6353c6c97f4f2723b773de89 --- /dev/null +++ b/tags_matplotlib__matplotlib-23476.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7c24bdc66947b7953a7ef4399d6d5f524c6583c6bb11e52f9d7c2c98cbd8399 +size 56026529 diff --git a/tags_matplotlib__matplotlib-23562.json b/tags_matplotlib__matplotlib-23562.json new file mode 100644 index 0000000000000000000000000000000000000000..44ffb928c4d3244c491c828397cfd02d35e43ddc --- /dev/null +++ b/tags_matplotlib__matplotlib-23562.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c7b28380696ac1d7971e76cb0f441ad785e314eb1054fc19b110116f3a0718 +size 55587278 diff --git a/tags_matplotlib__matplotlib-23563.json b/tags_matplotlib__matplotlib-23563.json new file mode 100644 index 0000000000000000000000000000000000000000..479a5a5bbd64045c0307413577d7221001fcadfe --- /dev/null +++ b/tags_matplotlib__matplotlib-23563.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8eeea5736f02bc2c0fdc145aafefdf408b7c630b5a1afd8109a680fec0069ec1 +size 55442945 diff --git a/tags_matplotlib__matplotlib-23913.json b/tags_matplotlib__matplotlib-23913.json new file mode 100644 index 0000000000000000000000000000000000000000..85f5547745c07c73b01040ea778dbc2a5a7d5718 --- /dev/null +++ b/tags_matplotlib__matplotlib-23913.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d17c40bdbe0557f3a3650b1de1e52ce8297d40437764d2ed2475c3f028c8bdb +size 55957278 diff --git a/tags_matplotlib__matplotlib-23964.json b/tags_matplotlib__matplotlib-23964.json new file mode 100644 index 0000000000000000000000000000000000000000..84de044bf092b41735668fa9f8b0c1004218bb71 --- /dev/null +++ b/tags_matplotlib__matplotlib-23964.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3ad72cc04f962e31fd78c88b4dbc6e9a93e6226804c5b71c7f1235640e2088e +size 55987356 diff --git a/tags_matplotlib__matplotlib-23987.json b/tags_matplotlib__matplotlib-23987.json new file mode 100644 index 0000000000000000000000000000000000000000..83fe03ea03413cb22278450bfab99e61743149a1 --- /dev/null +++ b/tags_matplotlib__matplotlib-23987.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d751dea89b693198ebf146705eddf75b72a59b8d9b2c24e6250b415a8231c64b +size 55986887 diff --git a/tags_matplotlib__matplotlib-24149.json b/tags_matplotlib__matplotlib-24149.json new file mode 100644 index 0000000000000000000000000000000000000000..b6d46aaec81b55f6fee6343cca5b67cd661fea4c --- /dev/null +++ b/tags_matplotlib__matplotlib-24149.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:506a4ec652ff79aaaf8cb45247183a8f36debb2ee49bad3405f855c591206f9b +size 56586168 diff --git a/tags_matplotlib__matplotlib-24265.json b/tags_matplotlib__matplotlib-24265.json new file mode 100644 index 0000000000000000000000000000000000000000..f3bff8ad1afea3d9709f9a95f40667fb6329b8fe --- /dev/null +++ b/tags_matplotlib__matplotlib-24265.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed38e8ae76efe7f067f6597bc752472370ec7988be4ff645a27128bc483d0a79 +size 57769411 diff --git a/tags_matplotlib__matplotlib-24334.json b/tags_matplotlib__matplotlib-24334.json new file mode 100644 index 0000000000000000000000000000000000000000..cf4c0a553f05424d068a75045379c281b161c437 --- /dev/null +++ b/tags_matplotlib__matplotlib-24334.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17c6ca23c79afdf1828122e25b3434d3b79ea0fbe9deba3a0a244e5a3ed198c5 +size 57201061 diff --git a/tags_matplotlib__matplotlib-24970.json b/tags_matplotlib__matplotlib-24970.json new file mode 100644 index 0000000000000000000000000000000000000000..4bcd6e2c7176d2786efbb29e4801ce7223a40115 --- /dev/null +++ b/tags_matplotlib__matplotlib-24970.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4150c3329aef4320933e55a2b2ffd2211296c102a7913c8a54a3edf0e724c5af +size 57802990 diff --git a/tags_matplotlib__matplotlib-25079.json b/tags_matplotlib__matplotlib-25079.json new file mode 100644 index 0000000000000000000000000000000000000000..a0d0a29167f3fd551988cc8e92361a20822eedb5 --- /dev/null +++ b/tags_matplotlib__matplotlib-25079.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffc82579d2b601bb519b21f326043869253ab7436d20866472a14743b24ee3c1 +size 57896896 diff --git a/tags_matplotlib__matplotlib-25311.json b/tags_matplotlib__matplotlib-25311.json new file mode 100644 index 0000000000000000000000000000000000000000..4cc2154751b55282b00a68dcde9bf19fc6865b92 --- /dev/null +++ b/tags_matplotlib__matplotlib-25311.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:359453ffb978e28c4daecf132121aa955b0efc148afd7c2e86b6c20b0f3bc1bb +size 58256941 diff --git a/tags_matplotlib__matplotlib-25332.json b/tags_matplotlib__matplotlib-25332.json new file mode 100644 index 0000000000000000000000000000000000000000..eb0d7dac968772b5b17d030ff5f23ab0582d0b92 --- /dev/null +++ b/tags_matplotlib__matplotlib-25332.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16979cf3e8822c00d4079c81a5ca935311b8a85d144a83c71a7f4182de00f8e1 +size 58261441 diff --git a/tags_matplotlib__matplotlib-25433.json b/tags_matplotlib__matplotlib-25433.json new file mode 100644 index 0000000000000000000000000000000000000000..463b111ea1384186c0e6ffae5ca681f354ac66e3 --- /dev/null +++ b/tags_matplotlib__matplotlib-25433.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aee4269dacbecbd209268b907a7814b485f5889daec712e0d89b102ea74b029 +size 57861691 diff --git a/tags_matplotlib__matplotlib-25442.json b/tags_matplotlib__matplotlib-25442.json new file mode 100644 index 0000000000000000000000000000000000000000..f1bade91cac79a044f54dcba83d94df2d03a3e2d --- /dev/null +++ b/tags_matplotlib__matplotlib-25442.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30b51ba3959ac875f0514fedb36c2102bef37889c202595f049ebe46d8238eb6 +size 57861288 diff --git a/tags_matplotlib__matplotlib-25498.json b/tags_matplotlib__matplotlib-25498.json new file mode 100644 index 0000000000000000000000000000000000000000..a64240a92b3924a3144b67c0d1bd281730d09aac --- /dev/null +++ b/tags_matplotlib__matplotlib-25498.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b72937a9a8a5b442ed17ae8e3dd8e5f77272ac8fb40129a87fbbdf4e220921d +size 57885659 diff --git a/tags_matplotlib__matplotlib-26011.json b/tags_matplotlib__matplotlib-26011.json new file mode 100644 index 0000000000000000000000000000000000000000..263356a1f79c627f676eb80c722b88a5a444ae43 --- /dev/null +++ b/tags_matplotlib__matplotlib-26011.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:597d55fb24825d260458b2c1057fb841cad3ca77490984995638025e846b5c1d +size 58648096 diff --git a/tags_matplotlib__matplotlib-26020.json b/tags_matplotlib__matplotlib-26020.json new file mode 100644 index 0000000000000000000000000000000000000000..659239139ef33ce7f2b301152bdb9fc2a8a11f44 --- /dev/null +++ b/tags_matplotlib__matplotlib-26020.json @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5cb6161443272b1dda814cbe1c82a2eb2172ab8ff91006cbe57cf64fb95c37be +size 57620472 diff --git a/tags_mwaskom__seaborn-2848.json b/tags_mwaskom__seaborn-2848.json new file mode 100644 index 0000000000000000000000000000000000000000..5eef9a17cd8b861d19868092ec2f8bac85b9d68f --- /dev/null +++ b/tags_mwaskom__seaborn-2848.json @@ -0,0 +1 @@ +[{"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/ci/check_gallery.py", "rel_fname": "ci/check_gallery.py", "line": 12, "name": "read", "kind": "ref", "category": "function", "info": " exec(fid.read())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 27, "name": "abspath", "kind": "ref", "category": "function", "info": "sys.path.insert(0, os.path.abspath('sphinxext'))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 76, "name": "abspath", "kind": "ref", "category": "function", "info": "sys.path.insert(0, os.path.abspath(os.path.pardir))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 143, "name": "get_html_theme_path", "kind": "ref", "category": "function", "info": "html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 287, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.add_js_file('copybutton.js')\n app.add_css_file('style.css')\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 288, "name": "add_js_file", "kind": "ref", "category": "function", "info": " app.add_js_file('copybutton.js')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 289, "name": "add_css_file", "kind": "ref", "category": "function", "info": " app.add_css_file('style.css')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 40, "name": "MetadataError", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 44, "name": "pop_recursive", "kind": "def", "category": "function", "info": "def pop_recursive(d, key, default=None):\n \"\"\"dict.pop(key) where `key` is a `.`-delimited list of nested keys.\n >>> d = {'a': {'b': 1, 'c': 2}}\n >>> pop_recursive(d, 'a.c')\n 2\n >>> d\n {'a': {'b': 1}}\n \"\"\"\n nested = key.split('.')\n current = d\n for k in nested[:-1]:\n if hasattr(current, 'get'):\n current = current.get(k, {})\n else:\n return default\n if not hasattr(current, 'pop'):\n return default\n return current.pop(nested[-1], default)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 64, "name": "strip_output", "kind": "def", "category": "function", "info": "def strip_output(nb):\n \"\"\"\n Strip the outputs, execution count/prompt number and miscellaneous\n metadata from a notebook object, unless specified to keep either the\n outputs or counts.\n \"\"\"\n keys = {'metadata': [], 'cell': {'metadata': [\"execution\"]}}\n\n nb.metadata.pop('signature', None)\n nb.metadata.pop('widgets', None)\n\n for field in keys['metadata']:\n pop_recursive(nb.metadata, field)\n\n for cell in nb.cells:\n\n # Remove the outputs, unless directed otherwise\n if 'outputs' in cell:\n\n cell['outputs'] = []\n\n # Remove the prompt_number/execution_count, unless directed otherwise\n if 'prompt_number' in cell:\n cell['prompt_number'] = None\n if 'execution_count' in cell:\n cell['execution_count'] = None\n\n # Always remove this metadata\n for output_style in ['collapsed', 'scrolled']:\n if output_style in cell.metadata:\n cell.metadata[output_style] = False\n if 'metadata' in cell:\n for field in ['collapsed', 'scrolled', 'ExecuteTime']:\n cell.metadata.pop(field, None)\n for (extra, fields) in keys['cell'].items():\n if extra in cell:\n for field in fields:\n pop_recursive(getattr(cell, extra), field)\n return nb\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 76, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(nb.metadata, field)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 101, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(getattr(cell, extra), field)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 121, "name": "ExecutePreprocessor", "kind": "ref", "category": "function", "info": " ep = ExecutePreprocessor(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 125, "name": "preprocess", "kind": "ref", "category": "function", "info": " ep.preprocess(nb, {\"metadata\": {\"path\": basedir}})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 142, "name": "RSTExporter", "kind": "ref", "category": "function", "info": " exp = RSTExporter()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 151, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 151, "name": "TagRemovePreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 152, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 152, "name": "ExtractOutputPreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 154, "name": "from_notebook_node", "kind": "ref", "category": "function", "info": " body, resources = exp.from_notebook_node(nb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 158, "name": "strip_output", "kind": "ref", "category": "function", "info": " nb = strip_output(nb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/nextgen/nb_to_doc.py", "rel_fname": "doc/nextgen/nb_to_doc.py", "line": 170, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(imdir):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 21, "name": "execfile", "kind": "def", "category": "function", "info": "def execfile(filename, globals=None, locals=None):\n with open(filename, \"rb\") as fp:\n exec(compile(fp.read(), filename, 'exec'), globals, locals)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 133, "name": "create_thumbnail", "kind": "def", "category": "function", "info": "def create_thumbnail(infile, thumbfile,\n width=275, height=275,\n cx=0.5, cy=0.5, border=4):\n baseout, extout = op.splitext(thumbfile)\n\n im = matplotlib.image.imread(infile)\n rows, cols = im.shape[:2]\n x0 = int(cx * cols - .5 * width)\n y0 = int(cy * rows - .5 * height)\n xslice = slice(x0, x0 + width)\n yslice = slice(y0, y0 + height)\n thumb = im[yslice, xslice]\n thumb[:border, :, :3] = thumb[-border:, :, :3] = 0\n thumb[:, :border, :3] = thumb[:, -border:, :3] = 0\n\n dpi = 100\n fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)\n\n ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n frameon=False, xticks=[], yticks=[])\n if all(thumb.shape):\n ax.imshow(thumb, aspect='auto', resample=True,\n interpolation='bilinear')\n else:\n warnings.warn(\n f\"Bad thumbnail crop. {thumbfile} will be empty.\"\n )\n fig.savefig(thumbfile, dpi=dpi)\n return fig\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 151, "name": "add_axes", "kind": "ref", "category": "function", "info": " ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 164, "name": "indent", "kind": "def", "category": "function", "info": "def indent(s, N=4):\n \"\"\"indent a string\"\"\"\n return s.replace('\\n', '\\n' + N * ' ')\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 169, "name": "ExampleGenerator", "kind": "def", "category": "class", "info": "__init__\tdirname\tfname\tmodulename\tpyfilename\trstfilename\thtmlfilename\tpngfilename\tthumbfilename\tsphinxtag\tpagetitle\tplotfunc\tcomponents\textract_docstring\texec_file\ttoctree_entry\tcontents_entry"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 175, "name": "extract_docstring", "kind": "ref", "category": "function", "info": " self.extract_docstring()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 185, "name": "exec_file", "kind": "ref", "category": "function", "info": " self.exec_file()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 194, "name": "fname", "kind": "def", "category": "function", "info": " def fname(self):\n return op.split(self.filename)[1]\n\n @property\n def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 198, "name": "modulename", "kind": "def", "category": "function", "info": " def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 202, "name": "pyfilename", "kind": "def", "category": "function", "info": " def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 206, "name": "rstfilename", "kind": "def", "category": "function", "info": " def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 210, "name": "htmlfilename", "kind": "def", "category": "function", "info": " def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 214, "name": "pngfilename", "kind": "def", "category": "function", "info": " def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 219, "name": "thumbfilename", "kind": "def", "category": "function", "info": " def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 224, "name": "sphinxtag", "kind": "def", "category": "function", "info": " def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 228, "name": "pagetitle", "kind": "def", "category": "function", "info": " def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 232, "name": "plotfunc", "kind": "def", "category": "function", "info": " def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 245, "name": "components", "kind": "def", "category": "function", "info": " def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 257, "name": "extract_docstring", "kind": "def", "category": "function", "info": " def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 260, "name": "readlines", "kind": "ref", "category": "function", "info": " lines = open(self.filename).readlines()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 300, "name": "exec_file", "kind": "def", "category": "function", "info": " def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 306, "name": "execfile", "kind": "ref", "category": "function", "info": " execfile(self.filename, my_globals)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 316, "name": "create_thumbnail", "kind": "ref", "category": "function", "info": " create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 318, "name": "toctree_entry", "kind": "def", "category": "function", "info": " def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 321, "name": "contents_entry", "kind": "def", "category": "function", "info": " def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \"
\\n\"\n \" \\n\"\n \" \\n\"\n \" \\n\"\n \"

{}

\\n\"\n \"
\\n\"\n \"
\\n\"\n \"
\\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 368, "name": "ExampleGenerator", "kind": "ref", "category": "function", "info": " ex = ExampleGenerator(filename, target_dir)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 383, "name": "toctree_entry", "kind": "ref", "category": "function", "info": " toctree += ex.toctree_entry()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 384, "name": "contents_entry", "kind": "ref", "category": "function", "info": " contents += ex.contents_entry()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 397, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect('builder-inited', main)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 9, "name": "line_type", "kind": "def", "category": "function", "info": "def line_type(line):\n\n if line.startswith(\" \"):\n return \"code\"\n else:\n return \"markdown\"\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 17, "name": "add_cell", "kind": "def", "category": "function", "info": "def add_cell(nb, lines, cell_type):\n\n cell_objs = {\n \"code\": nbformat.v4.new_code_cell,\n \"markdown\": nbformat.v4.new_markdown_cell,\n }\n text = \"\\n\".join(lines)\n cell = cell_objs[cell_type](text)\n nb[\"cells\"].append(cell)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 36, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " lines = NumpyDocString(pydoc.getdoc(obj))[\"Examples\"]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 41, "name": "new_notebook", "kind": "ref", "category": "function", "info": " nb = nbformat.v4.new_notebook()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 57, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) != cell_type:\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 60, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 61, "name": "line_type", "kind": "ref", "category": "function", "info": " cell_type = line_type(line)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 64, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) == \"code\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 70, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 72, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f\"docstrings/{name}.ipynb\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 14, "name": "poisson_disc_sample", "kind": "def", "category": "function", "info": "def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):\n \"\"\"Find positions using poisson-disc sampling.\"\"\"\n # See http://bost.ocks.org/mike/algorithms/\n rng = np.random.default_rng(seed)\n uniform = rng.uniform\n randint = rng.integers\n\n # Cache the results\n key = array_radius, pad_radius, seed\n if key in XY_CACHE:\n return XY_CACHE[key]\n\n # Start at a fixed point we know will work\n start = np.zeros(d)\n samples = [start]\n queue = [start]\n\n while queue:\n\n # Pick a sample to expand from\n s_idx = randint(len(queue))\n s = queue[s_idx]\n\n for i in range(candidates):\n # Generate a candidate from this sample\n coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n\n # Check the three conditions to accept the candidate\n in_array = np.sqrt(np.sum(coords ** 2)) < array_radius\n in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)\n\n if in_array and in_ring:\n # Accept the candidate\n samples.append(coords)\n queue.append(coords)\n break\n\n if (i + 1) == candidates:\n # We've exhausted the particular sample\n queue.pop(s_idx)\n\n samples = np.array(samples)\n XY_CACHE[key] = samples\n return samples\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 17, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 34, "name": "randint", "kind": "ref", "category": "function", "info": " s_idx = randint(len(queue))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 39, "name": "uniform", "kind": "ref", "category": "function", "info": " coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 60, "name": "logo", "kind": "def", "category": "function", "info": "def logo(\n ax,\n color_kws, ring, ring_idx, edge,\n pdf_means, pdf_sigma, dy, y0, w, h,\n hist_mean, hist_sigma, hist_y0, lw, skip,\n scatter, pad, scale,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 70, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 71, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect('equal')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 79, "name": "gaussian", "kind": "ref", "category": "function", "info": " y = gaussian(x.size, pdf_sigma)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 97, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 104, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(bg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 115, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(wedge)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 120, "name": "gaussian", "kind": "ref", "category": "function", "info": " hist_y = gaussian(x.size, hist_sigma)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 133, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(fg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 138, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " u.set_clip_path(fg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 143, "name": "poisson_disc_sample", "kind": "ref", "category": "function", "info": " xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 153, "name": "get_paths", "kind": "ref", "category": "function", "info": " path = u.get_paths()[0]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "get_transform", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 155, "name": "set_visible", "kind": "ref", "category": "function", "info": " u.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 182, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " color = sns.cubehelix_palette(**kwargs[\"color_kws\"])[color_idx]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 187, "name": "logo", "kind": "ref", "category": "function", "info": " logo(ax, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 194, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 204, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 212, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 222, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 40, "name": "MetadataError", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 44, "name": "pop_recursive", "kind": "def", "category": "function", "info": "def pop_recursive(d, key, default=None):\n \"\"\"dict.pop(key) where `key` is a `.`-delimited list of nested keys.\n >>> d = {'a': {'b': 1, 'c': 2}}\n >>> pop_recursive(d, 'a.c')\n 2\n >>> d\n {'a': {'b': 1}}\n \"\"\"\n nested = key.split('.')\n current = d\n for k in nested[:-1]:\n if hasattr(current, 'get'):\n current = current.get(k, {})\n else:\n return default\n if not hasattr(current, 'pop'):\n return default\n return current.pop(nested[-1], default)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 64, "name": "strip_output", "kind": "def", "category": "function", "info": "def strip_output(nb):\n \"\"\"\n Strip the outputs, execution count/prompt number and miscellaneous\n metadata from a notebook object, unless specified to keep either the\n outputs or counts.\n \"\"\"\n keys = {'metadata': [], 'cell': {'metadata': [\"execution\"]}}\n\n nb.metadata.pop('signature', None)\n nb.metadata.pop('widgets', None)\n\n for field in keys['metadata']:\n pop_recursive(nb.metadata, field)\n\n for cell in nb.cells:\n\n # Remove the outputs, unless directed otherwise\n if 'outputs' in cell:\n\n cell['outputs'] = []\n\n # Remove the prompt_number/execution_count, unless directed otherwise\n if 'prompt_number' in cell:\n cell['prompt_number'] = None\n if 'execution_count' in cell:\n cell['execution_count'] = None\n\n # Always remove this metadata\n for output_style in ['collapsed', 'scrolled']:\n if output_style in cell.metadata:\n cell.metadata[output_style] = False\n if 'metadata' in cell:\n for field in ['collapsed', 'scrolled', 'ExecuteTime']:\n cell.metadata.pop(field, None)\n for (extra, fields) in keys['cell'].items():\n if extra in cell:\n for field in fields:\n pop_recursive(getattr(cell, extra), field)\n return nb\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 76, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(nb.metadata, field)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 101, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(getattr(cell, extra), field)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 121, "name": "ExecutePreprocessor", "kind": "ref", "category": "function", "info": " ep = ExecutePreprocessor(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 126, "name": "preprocess", "kind": "ref", "category": "function", "info": " ep.preprocess(nb, {\"metadata\": {\"path\": basedir}})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 143, "name": "RSTExporter", "kind": "ref", "category": "function", "info": " exp = RSTExporter()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "TagRemovePreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 153, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 153, "name": "ExtractOutputPreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 155, "name": "from_notebook_node", "kind": "ref", "category": "function", "info": " body, resources = exp.from_notebook_node(nb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 159, "name": "strip_output", "kind": "ref", "category": "function", "info": " nb = strip_output(nb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 171, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(imdir):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 15, "name": "read", "kind": "ref", "category": "function", "info": " nb = nbformat.read(f, as_version=4)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 21, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "sns.lmplot(x=\"x\", y=\"y\", col=\"dataset\", hue=\"dataset\", data=df,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 12, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f, left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 19, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=\"carat\", y=\"price\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "fmri = sns.load_dataset(\"fmri\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 14, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(x=\"timepoint\", y=\"signal\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 10, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "dots = sns.load_dataset(\"dots\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 13, "name": "color_palette", "kind": "ref", "category": "function", "info": "palette = sns.color_palette(\"rocket_r\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 16, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 12, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 18, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Body mass (g)\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 19, "name": "set_title", "kind": "ref", "category": "function", "info": "g.legend.set_title(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\", palette=\"pastel\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 14, "name": "boxplot", "kind": "ref", "category": "function", "info": "sns.boxplot(x=\"day\", y=\"total_bill\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(offset=10, trim=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 13, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=tips, x=\"day\", y=\"total_bill\", hue=\"smoker\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": " .get_level_values(\"network\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "corr", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "stack", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "reset_index", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 26, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 35, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 36, "name": "margins", "kind": "ref", "category": "function", "info": "g.ax.margins(.02)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 37, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": "for label in g.ax.get_xticklabels():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 38, "name": "set_rotation", "kind": "ref", "category": "function", "info": " label.set_rotation(90)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 40, "name": "set_edgecolor", "kind": "ref", "category": "function", "info": " artist.set_edgecolor(\".7\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(11)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 11, "name": "gamma", "kind": "ref", "category": "function", "info": "x = rs.gamma(2, size=1000)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = -.5 * x + rs.normal(size=1000)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 14, "name": "jointplot", "kind": "ref", "category": "function", "info": "sns.jointplot(x=x, y=y, kind=\"hex\", color=\"#4CB391\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 18, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 27, "name": "set_major_formatter", "kind": "ref", "category": "function", "info": "ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 28, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks([500, 1000, 2000, 5000, 10000])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 13, "name": "set_xscale", "kind": "ref", "category": "function", "info": "ax.set_xscale(\"log\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 16, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 23, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(x=\"distance\", y=\"method\", data=planets,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(trim=True, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 20, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(x=\"value\", y=\"measurement\", hue=\"species\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 26, "name": "pointplot", "kind": "ref", "category": "function", "info": "sns.pointplot(x=\"value\", y=\"measurement\", hue=\"species\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 32, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": "handles, labels = ax.get_legend_handles_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 12, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=planets, x=\"year\", y=\"distance\", marginal_ticks=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 18, "name": "add_axes", "kind": "ref", "category": "function", "info": "cax = g.figure.add_axes([.15, .55, .02, .2])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 21, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 25, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, element=\"step\", color=\"#03012d\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 13, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", rc={\"axes.facecolor\": (0, 0, 0, 0)})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 13, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(1979)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 14, "name": "randn", "kind": "ref", "category": "function", "info": "x = rs.randn(500)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 21, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "pal = sns.cubehelix_palette(10, rot=-.25, light=.7)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 22, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, row=\"g\", hue=\"g\", aspect=15, height=.5, palette=pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 31, "name": "refline", "kind": "ref", "category": "function", "info": "g.refline(y=0, linewidth=2, linestyle=\"-\", color=None, clip_on=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 35, "name": "label", "kind": "def", "category": "function", "info": "def label(x, color, label):\n ax = plt.gca()\n ax.text(0, .2, label, fontweight=\"bold\", color=color,\n ha=\"left\", va=\"center\", transform=ax.transAxes)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 47, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 49, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(bottom=True, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 11, "name": "boxenplot", "kind": "ref", "category": "function", "info": "sns.boxenplot(x=\"clarity\", y=\"carat\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rng = np.random.RandomState(0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 16, "name": "multivariate_normal", "kind": "ref", "category": "function", "info": "x, y = rng.multivariate_normal(mean, cov, n).T\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 20, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=x, y=y, s=5, color=\".15\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 21, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(x=x, y=y, bins=50, pthresh=.1, cmap=\"mako\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 22, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(x=x, y=y, levels=5, color=\"w\", linewidths=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 16, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(x=\"age\", y=\"survived\", col=\"sex\", hue=\"sex\", data=df,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(4)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 16, "name": "randint", "kind": "ref", "category": "function", "info": "pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 24, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "grid = sns.FacetGrid(df, col=\"walk\", hue=\"walk\", palette=\"tab20c\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 28, "name": "refline", "kind": "ref", "category": "function", "info": "grid.refline(y=0, linestyle=\":\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(33)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 16, "name": "normal", "kind": "ref", "category": "function", "info": "d = pd.DataFrame(data=rs.normal(size=(100, 26)),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 20, "name": "corr", "kind": "ref", "category": "function", "info": "corr = d.corr()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 29, "name": "diverging_palette", "kind": "ref", "category": "function", "info": "cmap = sns.diverging_palette(230, 20, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 32, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", color_codes=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=mpg, x=\"mpg\", y=\"acceleration\", space=0, ratio=17)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.scatterplot, size=mpg[\"horsepower\"], sizes=(30, 120),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 14, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.rugplot, height=1, color=\"g\", alpha=.6)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 14, "name": "set_aspect", "kind": "ref", "category": "function", "info": "ax.set_aspect(\"equal\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 17, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 18, "name": "query", "kind": "ref", "category": "function", "info": " data=iris.query(\"species != 'versicolor'\"),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 13, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 11, "name": "blend_palette", "kind": "ref", "category": "function", "info": "cmap = sns.blend_palette(colors, input=\"husl\", as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 12, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 20, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"Snoot length (mm)\", \"Snoot depth (mm)\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 11, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(df, diag_sharey=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 12, "name": "map_upper", "kind": "ref", "category": "function", "info": "g.map_upper(sns.scatterplot, s=15)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 13, "name": "map_lower", "kind": "ref", "category": "function", "info": "g.map_lower(sns.kdeplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 14, "name": "map_diag", "kind": "ref", "category": "function", "info": "g.map_diag(sns.kdeplot, lw=2)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "titanic = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 12, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(titanic, y_vars=\"survived\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 19, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(fig=g.fig, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 34, "name": "grid", "kind": "ref", "category": "function", "info": " ax.xaxis.grid(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 35, "name": "grid", "kind": "ref", "category": "function", "info": " ax.yaxis.grid(True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 37, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", context=\"talk\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 9, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(8)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y1, palette=\"rocket\", ax=ax1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 19, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax1.set_ylabel(\"Sequential\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 23, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y2, palette=\"vlag\", ax=ax2)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 25, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax2.set_ylabel(\"Diverging\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 28, "name": "choice", "kind": "ref", "category": "function", "info": "y3 = rs.choice(y1, len(y1), replace=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 29, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y3, palette=\"deep\", ax=ax3)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 31, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax3.set_ylabel(\"Qualitative\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 34, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 11, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(50)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 20, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " cmap = sns.cubehelix_palette(start=s, light=1, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 23, "name": "normal", "kind": "ref", "category": "function", "info": " x, y = rs.normal(size=(2, 50))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 24, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 31, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 16, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"pastel\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"total\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 21, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"muted\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 22, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"alcohol\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"exercise\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 13, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(x=\"time\", y=\"pulse\", hue=\"kind\", col=\"diet\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 21, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, col=\"speed\", hue=\"speed\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 10, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(x=\"total_bill\", y=\"tip\", data=tips,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(7)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 11, "name": "normal", "kind": "ref", "category": "function", "info": "x = rs.normal(2, 1, 75)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = 2 + 1.5 * x + rs.normal(0, 2, 75)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 15, "name": "residplot", "kind": "ref", "category": "function", "info": "sns.residplot(x=x, y=y, lowess=True, color=\"g\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(x=\"horsepower\", y=\"mpg\", hue=\"origin\", size=\"weight\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\", palette=\"muted\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 14, "name": "swarmplot", "kind": "ref", "category": "function", "info": "ax = sns.swarmplot(data=df, x=\"body_mass_g\", y=\"sex\", hue=\"species\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 10, "name": "pairplot", "kind": "ref", "category": "function", "info": "sns.pairplot(df, hue=\"species\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 13, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "cmap = sns.cubehelix_palette(rot=-.2, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 21, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.xaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 22, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.yaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 23, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 11, "name": "default_rng", "kind": "ref", "category": "function", "info": "rs = np.random.default_rng(0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 13, "name": "normal", "kind": "ref", "category": "function", "info": "d = rs.normal(0, 2, (n, p))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 17, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=d, palette=\"light:g\", inner=\"points\", orient=\"h\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=df, x=\"body_mass_g\", y=\"bill_depth_mm\", space=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.kdeplot,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 15, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, color=\"#03051A\", alpha=1, bins=25)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights_long = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 11, "name": "pivot", "kind": "ref", "category": "function", "info": "flights = flights_long.pivot(\"month\", \"year\", \"passengers\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 15, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(flights, annot=True, fmt=\"d\", linewidths=.5, ax=ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 21, "name": "husl_palette", "kind": "ref", "category": "function", "info": "network_pal = sns.husl_palette(8, s=.45)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 25, "name": "get_level_values", "kind": "ref", "category": "function", "info": "networks = df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "clustermap", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "corr", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 11, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 13, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 24, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(.8, .85, year, transform=ax.transAxes, fontweight=\"bold\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 27, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "get_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 36, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 37, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Passengers\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 38, "name": "tight_layout", "kind": "ref", "category": "function", "info": "g.tight_layout()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 12, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(365)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 13, "name": "randn", "kind": "ref", "category": "function", "info": "values = rs.randn(365, 4).cumsum(axis=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 16, "name": "rolling", "kind": "ref", "category": "function", "info": "data = data.rolling(7).mean()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 18, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(data=data, palette=\"tab10\", linewidth=2.5)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "corr", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "groupby", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "mean", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 22, "name": "astype", "kind": "ref", "category": "function", "info": "corr_df.index = corr_df.index.astype(int)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 23, "name": "sort_index", "kind": "ref", "category": "function", "info": "corr_df = corr_df.sort_index().T\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 33, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 5, "name": "MarkerStyle", "kind": "def", "category": "function", "info": "def MarkerStyle(marker=None, fillstyle=None):\n \"\"\"\n Allow MarkerStyle to accept a MarkerStyle object as parameter.\n\n Supports matplotlib < 3.3.0\n https://github.com/matplotlib/matplotlib/pull/16692\n\n \"\"\"\n if isinstance(marker, mpl.markers.MarkerStyle):\n if fillstyle is None:\n return marker\n else:\n marker = marker.get_marker()\n return mpl.markers.MarkerStyle(marker, fillstyle)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 17, "name": "get_marker", "kind": "ref", "category": "function", "info": " marker = marker.get_marker()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 18, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return mpl.markers.MarkerStyle(marker, fillstyle)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 21, "name": "norm_from_scale", "kind": "def", "category": "function", "info": "def norm_from_scale(scale, norm):\n \"\"\"Produce a Normalize object given a Scale and min/max domain limits.\"\"\"\n # This is an internal maplotlib function that simplifies things to access\n # It is likely to become part of the matplotlib API at some point:\n # https://github.com/matplotlib/matplotlib/issues/20329\n if isinstance(norm, mpl.colors.Normalize):\n return norm\n\n if scale is None:\n return None\n\n if norm is None:\n vmin = vmax = None\n else:\n vmin, vmax = norm # TODO more helpful error if this fails?\n\n class ScaledNorm(mpl.colors.Normalize):\n\n def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 37, "name": "ScaledNorm", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 39, "name": "__call__", "kind": "def", "category": "function", "info": " def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 42, "name": "process_value", "kind": "ref", "category": "function", "info": " value, is_scalar = self.process_value(value)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 43, "name": "autoscale_None", "kind": "ref", "category": "function", "info": " self.autoscale_None(value)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 53, "name": "transform", "kind": "ref", "category": "function", "info": " t_value = self.transform(value).reshape(np.shape(value))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 54, "name": "transform", "kind": "ref", "category": "function", "info": " t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 60, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " t_value = np.ma.masked_invalid(t_value, copy=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 63, "name": "ScaledNorm", "kind": "ref", "category": "function", "info": " new_norm = ScaledNorm(vmin, vmax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 64, "name": "get_transform", "kind": "ref", "category": "function", "info": " new_norm.transform = scale.get_transform().transform\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 69, "name": "scale_factory", "kind": "def", "category": "function", "info": "def scale_factory(scale, axis, **kwargs):\n \"\"\"\n Backwards compatability for creation of independent scales.\n\n Matplotlib scales require an Axis object for instantiation on < 3.4.\n But the axis is not used, aside from extraction of the axis_name in LogScale.\n\n \"\"\"\n modify_transform = False\n if Version(mpl.__version__) < Version(\"3.4\"):\n if axis[0] in \"xy\":\n modify_transform = True\n axis = axis[0]\n base = kwargs.pop(\"base\", None)\n if base is not None:\n kwargs[f\"base{axis}\"] = base\n nonpos = kwargs.pop(\"nonpositive\", None)\n if nonpos is not None:\n kwargs[f\"nonpos{axis}\"] = nonpos\n\n if isinstance(scale, str):\n class Axis:\n axis_name = axis\n axis = Axis()\n\n scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n\n if modify_transform:\n transform = scale.get_transform()\n transform.base = kwargs.get(\"base\", 10)\n if kwargs.get(\"nonpositive\") == \"mask\":\n # Setting a private attribute, but we only get here\n # on an old matplotlib, so this won't break going forwards\n transform._clip = False\n\n return scale\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 90, "name": "Axis", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 92, "name": "Axis", "kind": "ref", "category": "function", "info": " axis = Axis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 94, "name": "scale_factory", "kind": "ref", "category": "function", "info": " scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 97, "name": "get_transform", "kind": "ref", "category": "function", "info": " transform = scale.get_transform()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 107, "name": "set_scale_obj", "kind": "def", "category": "function", "info": "def set_scale_obj(ax, axis, scale):\n \"\"\"Handle backwards compatability with setting matplotlib scale.\"\"\"\n if Version(mpl.__version__) < Version(\"3.4\"):\n # The ability to pass a BaseScale instance to Axes.set_{}scale was added\n # to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089\n # Workaround: use the scale name, which is restrictive only if the user\n # wants to define a custom scale; they'll need to update the registry too.\n if scale.name is None:\n # Hack to support our custom Formatter-less CatScale\n return\n method = getattr(ax, f\"set_{axis}scale\")\n kws = {}\n if scale.name == \"function\":\n trans = scale.get_transform()\n kws[\"functions\"] = (trans._forward, trans._inverse)\n method(scale.name, **kws)\n else:\n ax.set(**{f\"{axis}scale\": scale})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 120, "name": "get_transform", "kind": "ref", "category": "function", "info": " trans = scale.get_transform()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 122, "name": "method", "kind": "ref", "category": "function", "info": " method(scale.name, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 16, "name": "PlotData", "kind": "def", "category": "class", "info": "__init__\t__contains__\tjoin\t_assign_variables"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 56, "name": "_assign_variables", "kind": "ref", "category": "function", "info": " frame, names, ids = self._assign_variables(data, variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 92, "name": "PlotData", "kind": "ref", "category": "function", "info": " new = PlotData(data, variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 120, "name": "_assign_variables", "kind": "def", "category": "function", "info": " def _assign_variables(\n self,\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataFrame, dict[str, str | None], dict[str, str | int]]:\n \"\"\"\n Assign values for plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data\n Input data where variable names map to vector values.\n variables\n Keys are names of plot variables (x, y, ...) each value is one of:\n\n - name of a column (or index level, or dictionary entry) in `data`\n - vector in any format that can construct a :class:`pandas.DataFrame`\n\n Returns\n -------\n frame\n Table mapping seaborn variables (x, y, color, ...) to data vectors.\n names\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n ids\n Like the `names` dict, but `None` values are replaced by the `id()`\n of the data object that defined the variable.\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in `data`, or when they are\n non-indexed vector datatypes that have a different length from `data`.\n\n \"\"\"\n source_data: dict | DataFrame\n frame: DataFrame\n names: dict[str, str | None]\n ids: dict[str, str | int]\n\n plot_data = {}\n names = {}\n ids = {}\n\n given_data = data is not None\n if given_data:\n source_data = data\n else:\n # Data is optional; all variables can be defined as vectors\n # But simplify downstream code by always having a usable source data object\n source_data = {}\n\n # TODO Generally interested in accepting a generic DataFrame interface\n # Track https://data-apis.org/ for development\n\n # Variables can also be extracted from the index of a DataFrame\n if isinstance(source_data, pd.DataFrame):\n index = source_data.index.to_frame().to_dict(\"series\")\n else:\n index = {}\n\n for key, val in variables.items():\n\n # Simply ignore variables with no specification\n if val is None:\n continue\n\n # Try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow other hashables when\n # taking from the main data object. Allow only strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n\n # TODO this will be rendered unnecessary by the following pandas fix:\n # https://github.com/pandas-dev/pandas/pull/41283\n try:\n hash(val)\n val_is_hashable = True\n except TypeError:\n val_is_hashable = False\n\n val_as_data_key = (\n # See https://github.com/pandas-dev/pandas/pull/41283\n # (isinstance(val, abc.Hashable) and val in source_data)\n (val_is_hashable and val in source_data)\n or (isinstance(val, str) and val in index)\n )\n\n if val_as_data_key:\n\n if val in source_data:\n plot_data[key] = source_data[val]\n elif val in index:\n plot_data[key] = index[val]\n names[key] = ids[key] = str(val)\n\n elif isinstance(val, str):\n\n # This looks like a column name but, lookup failed.\n\n err = f\"Could not interpret value `{val}` for `{key}`. \"\n if not given_data:\n err += \"Value is a string, but `data` was not passed.\"\n else:\n err += \"An entry with this name does not appear in `data`.\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value somehow represents data\n\n # Ignore empty data structures\n if isinstance(val, abc.Sized) and len(val) == 0:\n continue\n\n # If vector has no index, it must match length of data table\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if isinstance(val, abc.Sized) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the original name using pandas-like metadata\n if hasattr(val, \"name\"):\n names[key] = ids[key] = str(val.name) # type: ignore # mypy/1424\n else:\n names[key] = None\n ids[key] = id(val)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n # TODO Note: this fails when variable specs *only* have scalars!\n frame = pd.DataFrame(plot_data)\n\n return frame, names, ids\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 178, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = source_data.index.to_frame().to_dict(\"series\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 13, "name": "GroupBy", "kind": "def", "category": "class", "info": "__init__\t_get_groups\t_reorder_columns\tagg\tapply"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 46, "name": "_get_groups", "kind": "def", "category": "function", "info": " def _get_groups(self, data: DataFrame) -> MultiIndex:\n \"\"\"Return index with Cartesian product of ordered grouping variable levels.\"\"\"\n levels = {}\n for var, order in self.order.items():\n if var in data:\n if order is None:\n order = categorical_order(data[var])\n levels[var] = order\n\n grouper: str | list[str]\n groups: Index | MultiIndex | None\n if not levels:\n grouper = []\n groups = None\n elif len(levels) > 1:\n grouper = list(levels)\n groups = pd.MultiIndex.from_product(levels.values(), names=grouper)\n else:\n grouper, = list(levels)\n groups = pd.Index(levels[grouper], name=grouper)\n return grouper, groups\n\n def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n group_ids = dict(zip(grouper, key))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 52, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(data[var])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 68, "name": "_reorder_columns", "kind": "def", "category": "function", "info": " def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n group_ids = dict(zip(grouper, key))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 83, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 105, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 108, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 108, "name": "func", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 112, "name": "func", "kind": "ref", "category": "function", "info": " parts[key] = func(part_df, *args, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 123, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(res, data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 11, "name": "Move", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 20, "name": "Jitter", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 40, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(self.seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 42, "name": "jitter", "kind": "def", "category": "function", "info": " def jitter(data, col, scale):\n noise = rng.uniform(-.5, +.5, len(data))\n offsets = noise * scale\n return data[col] + offsets\n\n if self.width:\n data[orient] = jitter(data, orient, self.width * data[\"width\"])\n if self.x:\n data[\"x\"] = jitter(data, \"x\", self.x)\n if self.y:\n data[\"y\"] = jitter(data, \"y\", self.y)\n\n return data\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 43, "name": "uniform", "kind": "ref", "category": "function", "info": " noise = rng.uniform(-.5, +.5, len(data))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 48, "name": "jitter", "kind": "ref", "category": "function", "info": " data[orient] = jitter(data, orient, self.width * data[\"width\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 50, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"x\"] = jitter(data, \"x\", self.x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 52, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"y\"] = jitter(data, \"y\", self.y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 58, "name": "Dodge", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 77, "name": "groupby_pos", "kind": "def", "category": "function", "info": " def groupby_pos(s):\n grouper = [groups[v] for v in [orient, \"col\", \"row\"] if v in data]\n return s.groupby(grouper, sort=False, observed=True)\n\n def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 81, "name": "scale_widths", "kind": "def", "category": "function", "info": " def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 92, "name": "widths_to_offsets", "kind": "def", "category": "function", "info": " def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 95, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 96, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 116, "name": "Stack", "kind": "def", "category": "class", "info": "_stack\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 122, "name": "_stack", "kind": "def", "category": "function", "info": " def _stack(self, df, orient):\n\n # TODO should stack do something with ymin/ymax style marks?\n # Should there be an upstream conversion to baseline/height parameterization?\n\n if df[\"baseline\"].nunique() > 1:\n err = \"Stack move cannot be used when baselines are already heterogeneous\"\n raise RuntimeError(err)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n stacked_lengths = (df[other] - df[\"baseline\"]).dropna().cumsum()\n offsets = stacked_lengths.shift(1).fillna(0)\n\n df[other] = stacked_lengths\n df[\"baseline\"] = df[\"baseline\"] + offsets\n\n return df\n\n def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame:\n\n # TODO where to ensure that other semantic variables are sorted properly?\n # TODO why are we not using the passed in groupby here?\n groupers = [\"col\", \"row\", orient]\n return GroupBy(groupers).apply(data, self._stack, orient)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 145, "name": "GroupBy", "kind": "ref", "category": "function", "info": " return GroupBy(groupers).apply(data, self._stack, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 149, "name": "Shift", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 165, "name": "Norm", "kind": "def", "category": "class", "info": "_norm\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 177, "name": "_norm", "kind": "def", "category": "function", "info": " def _norm(self, df, var):\n\n if self.where is None:\n denom_data = df[var]\n else:\n denom_data = df.query(self.where)[var]\n df[var] = df[var] / denom_data.agg(self.func)\n\n if self.percent:\n df[var] = df[var] * 100\n\n return df\n\n def __call__(self, data: DataFrame, groupby: GroupBy, orient: str) -> DataFrame:\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return groupby.apply(data, self._norm, other)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 48, "name": "Layer", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 60, "name": "FacetSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 67, "name": "PairSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 78, "name": "build_plot_signature", "kind": "def", "category": "function", "info": "def build_plot_signature(cls):\n \"\"\"\n Decorator function for giving Plot a useful signature.\n\n Currently this mostly saves us some duplicated typing, but we would\n like eventually to have a way of registering new semantic properties,\n at which point dynamic signature generation would become more important.\n\n \"\"\"\n sig = inspect.signature(cls)\n params = [\n inspect.Parameter(\"args\", inspect.Parameter.VAR_POSITIONAL),\n inspect.Parameter(\"data\", inspect.Parameter.KEYWORD_ONLY, default=None)\n ]\n params.extend([\n inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)\n for name in PROPERTIES\n ])\n new_sig = sig.replace(parameters=params)\n cls.__signature__ = new_sig\n\n known_properties = textwrap.fill(\n \", \".join(PROPERTIES), 78, subsequent_indent=\" \" * 8,\n )\n\n if cls.__doc__ is not None: # support python -OO mode\n cls.__doc__ = cls.__doc__.format(known_properties=known_properties)\n\n return cls\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 110, "name": "Plot", "kind": "def", "category": "class", "info": "__init__\t_resolve_positionals\t__add__\t_repr_png_\t_clone\t_variables\ton\tadd\tpair\tfacet\tscale\tconfigure\ttheme\tsave\tplot\tshow"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 162, "name": "_resolve_positionals", "kind": "ref", "category": "function", "info": " data, variables = self._resolve_positionals(args, data, variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 169, "name": "PlotData", "kind": "ref", "category": "function", "info": " self._data = PlotData(data, variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 179, "name": "_resolve_positionals", "kind": "def", "category": "function", "info": " def _resolve_positionals(\n self,\n args: tuple[DataSource | VariableSpec, ...],\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataSource, dict[str, VariableSpec]]:\n \"\"\"Handle positional arguments, which may contain data / x / y.\"\"\"\n if len(args) > 3:\n err = \"Plot() accepts no more than 3 positional arguments (data, x, y).\"\n raise TypeError(err)\n\n # TODO need some clearer way to differentiate data / vector here\n # (There might be an abstract DataFrame class to use here?)\n if isinstance(args[0], (abc.Mapping, pd.DataFrame)):\n if data is not None:\n raise TypeError(\"`data` given by both name and position.\")\n data, args = args[0], args[1:]\n\n if len(args) == 2:\n x, y = args\n elif len(args) == 1:\n x, y = *args, None\n else:\n x = y = None\n\n for name, var in zip(\"yx\", (y, x)):\n if var is not None:\n if name in variables:\n raise TypeError(f\"`{name}` given by both name and position.\")\n # Keep coordinates at the front of the variables dict\n variables = {name: var, **variables}\n\n return data, variables\n\n def __add__(self, other):\n\n if isinstance(other, Mark) or isinstance(other, Stat):\n raise TypeError(\"Sorry, this isn't ggplot! Perhaps try Plot.add?\")\n\n other_type = other.__class__.__name__\n raise TypeError(f\"Unsupported operand type(s) for +: 'Plot' and '{other_type}\")\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n new._scales.update(self._scales)\n\n new._subplot_spec.update(self._subplot_spec)\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._target = self._target\n\n return new\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Draw the plot into an existing Matplotlib object.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n \"\"\"\n # TODO alternate name: target?\n\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n stat: Stat | None = None,\n move: Move | list[Move] | None = None,\n *,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Define a layer of the visualization.\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`seaborn.objects.Mark`\n The visual representation of the data to use in this layer.\n stat : :class:`seaborn.objects.Stat`\n A transformation applied to the data before plotting.\n move : :class:`seaborn.objects.Move`\n Additional transformation(s) to handle over-plotting.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which affects how the stat is computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the stat without scaling.\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n if stat is not None and not isinstance(stat, Stat):\n msg = f\"stat must be a Stat instance, not {type(stat)!r}.\"\n raise TypeError(msg)\n\n # TODO decide how to allow Mark to have default Stat/Move\n # if stat is None and hasattr(mark, \"default_stat\"):\n # stat = mark.default_stat()\n\n # TODO it doesn't work to supply scalars to variables, but that would be nice\n\n # TODO accept arbitrary variables defined by the stat (/move?) here\n # (but not in the Plot constructor)\n # Should stat variables ever go in the constructor, or just in the add call?\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 221, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n new._scales.update(self._scales)\n\n new._subplot_spec.update(self._subplot_spec)\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._target = self._target\n\n return new\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Draw the plot into an existing Matplotlib object.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n \"\"\"\n # TODO alternate name: target?\n\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n stat: Stat | None = None,\n move: Move | list[Move] | None = None,\n *,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Define a layer of the visualization.\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`seaborn.objects.Mark`\n The visual representation of the data to use in this layer.\n stat : :class:`seaborn.objects.Stat`\n A transformation applied to the data before plotting.\n move : :class:`seaborn.objects.Move`\n Additional transformation(s) to handle over-plotting.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which affects how the stat is computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the stat without scaling.\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n if stat is not None and not isinstance(stat, Stat):\n msg = f\"stat must be a Stat instance, not {type(stat)!r}.\"\n raise TypeError(msg)\n\n # TODO decide how to allow Mark to have default Stat/Move\n # if stat is None and hasattr(mark, \"default_stat\"):\n # stat = mark.default_stat()\n\n # TODO it doesn't work to supply scalars to variables, but that would be nice\n\n # TODO accept arbitrary variables defined by the stat (/move?) here\n # (but not in the Plot constructor)\n # Should stat variables ever go in the constructor, or just in the add call?\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 223, "name": "_repr_png_", "kind": "ref", "category": "function", "info": " return self.plot()._repr_png_()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 227, "name": "_clone", "kind": "def", "category": "function", "info": " def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n new._scales.update(self._scales)\n\n new._subplot_spec.update(self._subplot_spec)\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._target = self._target\n\n return new\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Draw the plot into an existing Matplotlib object.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n \"\"\"\n # TODO alternate name: target?\n\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n stat: Stat | None = None,\n move: Move | list[Move] | None = None,\n *,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Define a layer of the visualization.\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`seaborn.objects.Mark`\n The visual representation of the data to use in this layer.\n stat : :class:`seaborn.objects.Stat`\n A transformation applied to the data before plotting.\n move : :class:`seaborn.objects.Move`\n Additional transformation(s) to handle over-plotting.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which affects how the stat is computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the stat without scaling.\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n if stat is not None and not isinstance(stat, Stat):\n msg = f\"stat must be a Stat instance, not {type(stat)!r}.\"\n raise TypeError(msg)\n\n # TODO decide how to allow Mark to have default Stat/Move\n # if stat is None and hasattr(mark, \"default_stat\"):\n # stat = mark.default_stat()\n\n # TODO it doesn't work to supply scalars to variables, but that would be nice\n\n # TODO accept arbitrary variables defined by the stat (/move?) here\n # (but not in the Plot constructor)\n # Should stat variables ever go in the constructor, or just in the add call?\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 229, "name": "Plot", "kind": "ref", "category": "function", "info": " new = Plot()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 246, "name": "_variables", "kind": "def", "category": "function", "info": " def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Draw the plot into an existing Matplotlib object.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n \"\"\"\n # TODO alternate name: target?\n\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n stat: Stat | None = None,\n move: Move | list[Move] | None = None,\n *,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Define a layer of the visualization.\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`seaborn.objects.Mark`\n The visual representation of the data to use in this layer.\n stat : :class:`seaborn.objects.Stat`\n A transformation applied to the data before plotting.\n move : :class:`seaborn.objects.Move`\n Additional transformation(s) to handle over-plotting.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which affects how the stat is computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the stat without scaling.\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n if stat is not None and not isinstance(stat, Stat):\n msg = f\"stat must be a Stat instance, not {type(stat)!r}.\"\n raise TypeError(msg)\n\n # TODO decide how to allow Mark to have default Stat/Move\n # if stat is None and hasattr(mark, \"default_stat\"):\n # stat = mark.default_stat()\n\n # TODO it doesn't work to supply scalars to variables, but that would be nice\n\n # TODO accept arbitrary variables defined by the stat (/move?) here\n # (but not in the Plot constructor)\n # Should stat variables ever go in the constructor, or just in the add call?\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 257, "name": "on", "kind": "def", "category": "function", "info": " def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Draw the plot into an existing Matplotlib object.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n \"\"\"\n # TODO alternate name: target?\n\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n stat: Stat | None = None,\n move: Move | list[Move] | None = None,\n *,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Define a layer of the visualization.\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`seaborn.objects.Mark`\n The visual representation of the data to use in this layer.\n stat : :class:`seaborn.objects.Stat`\n A transformation applied to the data before plotting.\n move : :class:`seaborn.objects.Move`\n Additional transformation(s) to handle over-plotting.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which affects how the stat is computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the stat without scaling.\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n if stat is not None and not isinstance(stat, Stat):\n msg = f\"stat must be a Stat instance, not {type(stat)!r}.\"\n raise TypeError(msg)\n\n # TODO decide how to allow Mark to have default Stat/Move\n # if stat is None and hasattr(mark, \"default_stat\"):\n # stat = mark.default_stat()\n\n # TODO it doesn't work to supply scalars to variables, but that would be nice\n\n # TODO accept arbitrary variables defined by the stat (/move?) here\n # (but not in the Plot constructor)\n # Should stat variables ever go in the constructor, or just in the add call?\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 291, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 355, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 368, "name": "pair", "kind": "def", "category": "function", "info": " def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 458, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 462, "name": "facet", "kind": "def", "category": "function", "info": " def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 515, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 522, "name": "scale", "kind": "def", "category": "function", "info": " def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 541, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 545, "name": "configure", "kind": "def", "category": "function", "info": " def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 569, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 583, "name": "theme", "kind": "def", "category": "function", "info": " def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 591, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 596, "name": "save", "kind": "def", "category": "function", "info": " def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 608, "name": "save", "kind": "ref", "category": "function", "info": " self.plot().save(fname, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 619, "name": "Plotter", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 621, "name": "_extract_data", "kind": "ref", "category": "function", "info": " common, layers = plotter._extract_data(self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 622, "name": "_setup_figure", "kind": "ref", "category": "function", "info": " plotter._setup_figure(self, common, layers)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 623, "name": "_transform_coords", "kind": "ref", "category": "function", "info": " plotter._transform_coords(self, common, layers)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 625, "name": "_compute_stats", "kind": "ref", "category": "function", "info": " plotter._compute_stats(self, layers)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 626, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, layers)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 634, "name": "_plot_layer", "kind": "ref", "category": "function", "info": " plotter._plot_layer(self, layer)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 636, "name": "_make_legend", "kind": "ref", "category": "function", "info": " plotter._make_legend()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 661, "name": "Plotter", "kind": "def", "category": "class", "info": "__init__\tsave\tshow\t_repr_png_\t_extract_data\t_setup_figure\t_transform_coords\t_compute_stats\t_get_scale\t_setup_scales\t_plot_layer\t_scale_coords\t_unscale_coords\t_generate_pairings\t_get_subplot_index\t_filter_subplot_data\t_setup_split_generator\t_update_legend_contents\t_make_legend"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 681, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plotter: # TODO type args\n kwargs.setdefault(\"dpi\", 96)\n try:\n loc = os.path.expanduser(loc)\n except TypeError:\n # loc may be a buffer in which case that would not work\n pass\n self._figure.savefig(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n # TODO if we did not create the Plotter with pyplot, is it possible to do this?\n # If not we should clearly raise.\n import matplotlib.pyplot as plt\n plt.show(**kwargs)\n\n # TODO API for accessing the underlying matplotlib objects\n # TODO what else is useful in the public API for this class?\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n # TODO better to do this through a Jupyter hook? e.g.\n # ipy = IPython.core.formatters.get_ipython()\n # fmt = ipy.display_formatter.formatters[\"text/html\"]\n # fmt.for_type(Plot, ...)\n # Would like to have a svg option too, not sure how to make that flexible\n\n # TODO use matplotlib backend directly instead of going through savefig?\n\n # TODO perhaps have self.show() flip a switch to disable this, so that\n # user does not end up with two versions of the figure in the output\n\n # TODO use bbox_inches=\"tight\" like the inline backend?\n # pro: better results, con: (sometimes) confusing results\n # Better solution would be to default (with option to change)\n # to using constrained/tight layout.\n\n # TODO need to decide what the right default behavior here is:\n # - Use dpi=72 to match default InlineBackend figure size?\n # - Accept a generic \"scaling\" somewhere and scale DPI from that,\n # either with 1x -> 72 or 1x -> 96 and the default scaling be .75?\n # - Listen to rcParams? InlineBackend behavior makes that so complicated :(\n # - Do we ever want to *not* use retina mode at this point?\n\n from PIL import Image\n\n dpi = 96\n buffer = io.BytesIO()\n self._figure.savefig(buffer, dpi=dpi * 2, format=\"png\", bbox_inches=\"tight\")\n data = buffer.getvalue()\n\n scaling = .85 / 2\n # w, h = self._figure.get_size_inches()\n w, h = Image.open(buffer).size\n metadata = {\"width\": w * scaling, \"height\": h * scaling}\n return data, metadata\n\n def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n # TODO use context manager with theme that has been set\n # TODO (maybe wrap THIS function with context manager; would be cleaner)\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n figure_kws = {\"figsize\": getattr(p, \"_figsize\", None)} # TODO fix\n self._figure = subplots.init_figure(\n pair_spec, self.pyplot, figure_kws, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n label = next((name for name in names if name is not None), None)\n ax.set(**{f\"{axis}label\": label})\n\n # TODO there should be some override (in Plot.configure?) so that\n # tick labels can be shown on interior shared axes\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or axis in p._pair_spec and bool(p._pair_spec.get(\"wrap\"))\n or not p._pair_spec.get(\"cross\", True)\n )\n axis_obj.get_label().set_visible(show_axis_label)\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO title template should be configurable\n # ---- Also we want right-side titles for row facets in most cases?\n # ---- Or wrapped? That can get annoying too.\n # TODO should configure() accept a title= kwarg (for single subplot plots)?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"row\", \"col\"]:\n if sub[dim] is not None:\n name = common.names.get(dim) # TODO None = val looks bad\n title_parts.append(f\"{name} = {sub[dim]}\")\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n\n def _transform_coords(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n for var in p._variables:\n\n # Parse name to identify variable (x, y, xmin, etc.) and axis (x/y)\n # TODO should we have xmin0/xmin1 or x0min/x1min?\n m = re.match(r\"^(?P(?P[x|y])\\d*).*\", var)\n\n if m is None:\n continue\n\n prefix = m[\"prefix\"]\n axis = m[\"axis\"]\n\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n # TODO basically copied from _setup_scales, and very clumsy\n layer_values = [common.frame.filter(cols)]\n for layer in layers:\n if layer[\"data\"].frame is None:\n for df in layer[\"data\"].frames.values():\n layer_values.append(df.filter(cols))\n else:\n layer_values.append(layer[\"data\"].frame.filter(cols))\n\n if layer_values:\n var_df = pd.concat(layer_values, ignore_index=True)\n else:\n var_df = pd.DataFrame(columns=cols)\n\n prop = Coordinate(axis)\n scale_spec = self._get_scale(p, prefix, prop, var_df[var])\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec\n cat_scale = isinstance(scale_spec, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n # Now loop through each subplot, deriving the relevant seed data to setup\n # the scale (so that axis units / categories are initialized properly)\n # And then scale the data in each layer.\n subplots = [view for view in self._subplots if view[axis] == prefix]\n\n # Setup the scale on all of the data and plug it into self._scales\n # We do this because by the time we do self._setup_scales, coordinate data\n # will have been converted to floats already, so scale inference fails\n self._scales[var] = scale_spec.setup(var_df[var], prop)\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal tranforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n transformed_data.append(pd.Series(dtype=float, index=index, name=var))\n\n for view in subplots:\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = var_df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(var_df, view)\n elif share_state in var_df:\n # Sharing within row/col is more complicated\n use_rows = var_df[share_state] == view[share_state]\n idx = var_df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = var_df.index\n\n seed_values = var_df.loc[idx, var]\n\n scale = scale_spec.setup(seed_values, prop, axis=axis_obj)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = scale(layer_df.loc[idx, var])\n\n # TODO need decision about whether to do this or modify axis transform\n set_scale_obj(view[\"ax\"], axis, scale.matplotlib_scale)\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> ScaleSpec:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, ScaleSpec):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 684, "name": "expanduser", "kind": "ref", "category": "function", "info": " loc = os.path.expanduser(loc)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 700, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n new._scales.update(self._scales)\n\n new._subplot_spec.update(self._subplot_spec)\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._target = self._target\n\n return new\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Draw the plot into an existing Matplotlib object.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n \"\"\"\n # TODO alternate name: target?\n\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n stat: Stat | None = None,\n move: Move | list[Move] | None = None,\n *,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Define a layer of the visualization.\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`seaborn.objects.Mark`\n The visual representation of the data to use in this layer.\n stat : :class:`seaborn.objects.Stat`\n A transformation applied to the data before plotting.\n move : :class:`seaborn.objects.Move`\n Additional transformation(s) to handle over-plotting.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which affects how the stat is computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the stat without scaling.\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n if stat is not None and not isinstance(stat, Stat):\n msg = f\"stat must be a Stat instance, not {type(stat)!r}.\"\n raise TypeError(msg)\n\n # TODO decide how to allow Mark to have default Stat/Move\n # if stat is None and hasattr(mark, \"default_stat\"):\n # stat = mark.default_stat()\n\n # TODO it doesn't work to supply scalars to variables, but that would be nice\n\n # TODO accept arbitrary variables defined by the stat (/move?) here\n # (but not in the Plot constructor)\n # Should stat variables ever go in the constructor, or just in the add call?\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: list[Hashable] | Index[Hashable] | None = None,\n y: list[Hashable] | Index[Hashable] | None = None,\n wrap: int | None = None,\n cross: bool = True,\n # TODO other existing PairGrid things like corner?\n # TODO transpose, so that e.g. multiple y axes go across the columns\n ) -> Plot:\n \"\"\"\n Produce subplots with distinct `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data identifiers\n Variables that will define the grid of subplots.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n cross : bool\n When True, define a two-dimensional grid using the Cartesian product of `x`\n and `y`. Otherwise, define a one-dimensional grid by pairing `x` and `y`\n entries in by position.\n\n \"\"\"\n # TODO Problems to solve:\n #\n # - Unclear is how to handle the diagonal plots that PairGrid offers\n #\n # - Implementing this will require lots of downscale changes in figure setup,\n # and especially the axis scaling, which will need to be pair specific\n\n # TODO lists of vectors currently work, but I'm not sure where best to test\n # Will need to update the signature typing to keep them\n\n # TODO is it weird to call .pair() to create univariate plots?\n # i.e. Plot(data).pair(x=[...]). The basic logic is fine.\n # But maybe a different verb (e.g. Plot.spread) would be more clear?\n # Then Plot(data).pair(x=[...]) would show the given x vars vs all.\n\n # TODO would like to add transpose=True, which would then draw\n # Plot(x=...).pair(y=[...]) across the rows\n # This may also be possible by setting `wrap=1`, although currently the axes\n # are shared and the interior labels are disabeled (this is a bug either way)\n\n pair_spec: PairSpec = {}\n\n if x is None and y is None:\n\n # Default to using all columns in the input source data, aside from\n # those that were assigned to a variable in the constructor\n # TODO Do we want to allow additional filtering by variable type?\n # (Possibly even default to using only numeric columns)\n\n if self._data.source_data is None:\n err = \"You must pass `data` in the constructor to use default pairing.\"\n raise RuntimeError(err)\n\n all_unused_columns = [\n key for key in self._data.source_data\n if key not in self._data.names.values()\n ]\n if \"x\" not in self._data:\n x = all_unused_columns\n if \"y\" not in self._data:\n y = all_unused_columns\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n # TODO raise here if cross is False and len(x) != len(y)?\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n # TODO require kwargs?\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n Maximum height/width of the grid, with additional subplots \"wrapped\"\n on the other dimension. Requires that only one of `x` or `y` are set here.\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: ScaleSpec) -> Plot:\n \"\"\"\n Control mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n \"\"\"\n new = self._clone()\n new._scales.update(**scales)\n return new\n\n def configure(\n self,\n figsize: tuple[float, float] | None = None,\n sharex: bool | str | None = None,\n sharey: bool | str | None = None,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n Parameters\n ----------\n figsize: (width, height)\n Size of the resulting figure, in inches.\n sharex, sharey : bool, \"row\", or \"col\"\n Whether axis limits should be shared across subplots. Boolean values apply\n across the entire grid, whereas `\"row\"` or `\"col\"` have a smaller scope.\n Shared axes will have tick labels disabled.\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n # TODO this is a hack; make a proper figure spec object\n new._figsize = figsize # type: ignore\n\n if sharex is not None:\n new._subplot_spec[\"sharex\"] = sharex\n if sharey is not None:\n new._subplot_spec[\"sharey\"] = sharey\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n TODO\n \"\"\"\n # TODO Plot-specific themes using the seaborn theming system\n raise NotImplementedError()\n new = self._clone()\n return new\n\n # TODO decorate? (or similar, for various texts) alt names: label?\n\n def save(self, fname, **kwargs) -> Plot:\n \"\"\"\n Render the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n fname : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n Other keyword arguments are passed to :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n self.plot().save(fname, **kwargs)\n return self\n\n def plot(self, pyplot=False) -> Plotter:\n \"\"\"\n Compile the plot and return the :class:`Plotter` engine.\n\n \"\"\"\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot)\n\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n plotter._transform_coords(self, common, layers)\n\n plotter._compute_stats(self, layers)\n plotter._setup_scales(self, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n plotter._make_legend()\n\n # TODO this should be configurable\n if not plotter._figure.get_constrained_layout():\n plotter._figure.set_tight_layout(True)\n\n return plotter\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Render and display the plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 730, "name": "getvalue", "kind": "ref", "category": "function", "info": " data = buffer.getvalue()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 738, "name": "_extract_data", "kind": "def", "category": "function", "info": " def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n # TODO use context manager with theme that has been set\n # TODO (maybe wrap THIS function with context manager; would be cleaner)\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n figure_kws = {\"figsize\": getattr(p, \"_figsize\", None)} # TODO fix\n self._figure = subplots.init_figure(\n pair_spec, self.pyplot, figure_kws, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n label = next((name for name in names if name is not None), None)\n ax.set(**{f\"{axis}label\": label})\n\n # TODO there should be some override (in Plot.configure?) so that\n # tick labels can be shown on interior shared axes\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or axis in p._pair_spec and bool(p._pair_spec.get(\"wrap\"))\n or not p._pair_spec.get(\"cross\", True)\n )\n axis_obj.get_label().set_visible(show_axis_label)\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO title template should be configurable\n # ---- Also we want right-side titles for row facets in most cases?\n # ---- Or wrapped? That can get annoying too.\n # TODO should configure() accept a title= kwarg (for single subplot plots)?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"row\", \"col\"]:\n if sub[dim] is not None:\n name = common.names.get(dim) # TODO None = val looks bad\n title_parts.append(f\"{name} = {sub[dim]}\")\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n\n def _transform_coords(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n for var in p._variables:\n\n # Parse name to identify variable (x, y, xmin, etc.) and axis (x/y)\n # TODO should we have xmin0/xmin1 or x0min/x1min?\n m = re.match(r\"^(?P(?P[x|y])\\d*).*\", var)\n\n if m is None:\n continue\n\n prefix = m[\"prefix\"]\n axis = m[\"axis\"]\n\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n # TODO basically copied from _setup_scales, and very clumsy\n layer_values = [common.frame.filter(cols)]\n for layer in layers:\n if layer[\"data\"].frame is None:\n for df in layer[\"data\"].frames.values():\n layer_values.append(df.filter(cols))\n else:\n layer_values.append(layer[\"data\"].frame.filter(cols))\n\n if layer_values:\n var_df = pd.concat(layer_values, ignore_index=True)\n else:\n var_df = pd.DataFrame(columns=cols)\n\n prop = Coordinate(axis)\n scale_spec = self._get_scale(p, prefix, prop, var_df[var])\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec\n cat_scale = isinstance(scale_spec, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n # Now loop through each subplot, deriving the relevant seed data to setup\n # the scale (so that axis units / categories are initialized properly)\n # And then scale the data in each layer.\n subplots = [view for view in self._subplots if view[axis] == prefix]\n\n # Setup the scale on all of the data and plug it into self._scales\n # We do this because by the time we do self._setup_scales, coordinate data\n # will have been converted to floats already, so scale inference fails\n self._scales[var] = scale_spec.setup(var_df[var], prop)\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal tranforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n transformed_data.append(pd.Series(dtype=float, index=index, name=var))\n\n for view in subplots:\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = var_df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(var_df, view)\n elif share_state in var_df:\n # Sharing within row/col is more complicated\n use_rows = var_df[share_state] == view[share_state]\n idx = var_df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = var_df.index\n\n seed_values = var_df.loc[idx, var]\n\n scale = scale_spec.setup(seed_values, prop, axis=axis_obj)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = scale(layer_df.loc[idx, var])\n\n # TODO need decision about whether to do this or modify axis transform\n set_scale_obj(view[\"ax\"], axis, scale.matplotlib_scale)\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> ScaleSpec:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, ScaleSpec):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 754, "name": "_setup_figure", "kind": "def", "category": "function", "info": " def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n # TODO use context manager with theme that has been set\n # TODO (maybe wrap THIS function with context manager; would be cleaner)\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n figure_kws = {\"figsize\": getattr(p, \"_figsize\", None)} # TODO fix\n self._figure = subplots.init_figure(\n pair_spec, self.pyplot, figure_kws, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n label = next((name for name in names if name is not None), None)\n ax.set(**{f\"{axis}label\": label})\n\n # TODO there should be some override (in Plot.configure?) so that\n # tick labels can be shown on interior shared axes\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or axis in p._pair_spec and bool(p._pair_spec.get(\"wrap\"))\n or not p._pair_spec.get(\"cross\", True)\n )\n axis_obj.get_label().set_visible(show_axis_label)\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO title template should be configurable\n # ---- Also we want right-side titles for row facets in most cases?\n # ---- Or wrapped? That can get annoying too.\n # TODO should configure() accept a title= kwarg (for single subplot plots)?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"row\", \"col\"]:\n if sub[dim] is not None:\n name = common.names.get(dim) # TODO None = val looks bad\n title_parts.append(f\"{name} = {sub[dim]}\")\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n\n def _transform_coords(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n for var in p._variables:\n\n # Parse name to identify variable (x, y, xmin, etc.) and axis (x/y)\n # TODO should we have xmin0/xmin1 or x0min/x1min?\n m = re.match(r\"^(?P(?P[x|y])\\d*).*\", var)\n\n if m is None:\n continue\n\n prefix = m[\"prefix\"]\n axis = m[\"axis\"]\n\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n # TODO basically copied from _setup_scales, and very clumsy\n layer_values = [common.frame.filter(cols)]\n for layer in layers:\n if layer[\"data\"].frame is None:\n for df in layer[\"data\"].frames.values():\n layer_values.append(df.filter(cols))\n else:\n layer_values.append(layer[\"data\"].frame.filter(cols))\n\n if layer_values:\n var_df = pd.concat(layer_values, ignore_index=True)\n else:\n var_df = pd.DataFrame(columns=cols)\n\n prop = Coordinate(axis)\n scale_spec = self._get_scale(p, prefix, prop, var_df[var])\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec\n cat_scale = isinstance(scale_spec, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n # Now loop through each subplot, deriving the relevant seed data to setup\n # the scale (so that axis units / categories are initialized properly)\n # And then scale the data in each layer.\n subplots = [view for view in self._subplots if view[axis] == prefix]\n\n # Setup the scale on all of the data and plug it into self._scales\n # We do this because by the time we do self._setup_scales, coordinate data\n # will have been converted to floats already, so scale inference fails\n self._scales[var] = scale_spec.setup(var_df[var], prop)\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal tranforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n transformed_data.append(pd.Series(dtype=float, index=index, name=var))\n\n for view in subplots:\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = var_df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(var_df, view)\n elif share_state in var_df:\n # Sharing within row/col is more complicated\n use_rows = var_df[share_state] == view[share_state]\n idx = var_df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = var_df.index\n\n seed_values = var_df.loc[idx, var]\n\n scale = scale_spec.setup(seed_values, prop, axis=axis_obj)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = scale(layer_df.loc[idx, var])\n\n # TODO need decision about whether to do this or modify axis transform\n set_scale_obj(view[\"ax\"], axis, scale.matplotlib_scale)\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> ScaleSpec:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, ScaleSpec):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 767, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(common.frame[dim])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 770, "name": "Subplots", "kind": "ref", "category": "function", "info": " self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 774, "name": "init_figure", "kind": "ref", "category": "function", "info": " self._figure = subplots.init_figure(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 840, "name": "_transform_coords", "kind": "def", "category": "function", "info": " def _transform_coords(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n for var in p._variables:\n\n # Parse name to identify variable (x, y, xmin, etc.) and axis (x/y)\n # TODO should we have xmin0/xmin1 or x0min/x1min?\n m = re.match(r\"^(?P(?P[x|y])\\d*).*\", var)\n\n if m is None:\n continue\n\n prefix = m[\"prefix\"]\n axis = m[\"axis\"]\n\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n # TODO basically copied from _setup_scales, and very clumsy\n layer_values = [common.frame.filter(cols)]\n for layer in layers:\n if layer[\"data\"].frame is None:\n for df in layer[\"data\"].frames.values():\n layer_values.append(df.filter(cols))\n else:\n layer_values.append(layer[\"data\"].frame.filter(cols))\n\n if layer_values:\n var_df = pd.concat(layer_values, ignore_index=True)\n else:\n var_df = pd.DataFrame(columns=cols)\n\n prop = Coordinate(axis)\n scale_spec = self._get_scale(p, prefix, prop, var_df[var])\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec\n cat_scale = isinstance(scale_spec, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n # Now loop through each subplot, deriving the relevant seed data to setup\n # the scale (so that axis units / categories are initialized properly)\n # And then scale the data in each layer.\n subplots = [view for view in self._subplots if view[axis] == prefix]\n\n # Setup the scale on all of the data and plug it into self._scales\n # We do this because by the time we do self._setup_scales, coordinate data\n # will have been converted to floats already, so scale inference fails\n self._scales[var] = scale_spec.setup(var_df[var], prop)\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal tranforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n transformed_data.append(pd.Series(dtype=float, index=index, name=var))\n\n for view in subplots:\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = var_df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(var_df, view)\n elif share_state in var_df:\n # Sharing within row/col is more complicated\n use_rows = var_df[share_state] == view[share_state]\n idx = var_df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = var_df.index\n\n seed_values = var_df.loc[idx, var]\n\n scale = scale_spec.setup(seed_values, prop, axis=axis_obj)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = scale(layer_df.loc[idx, var])\n\n # TODO need decision about whether to do this or modify axis transform\n set_scale_obj(view[\"ax\"], axis, scale.matplotlib_scale)\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> ScaleSpec:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, ScaleSpec):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 846, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^(?P(?P[x|y])\\d*).*\", var)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 874, "name": "Coordinate", "kind": "ref", "category": "function", "info": " prop = Coordinate(axis)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 875, "name": "_get_scale", "kind": "ref", "category": "function", "info": " scale_spec = self._get_scale(p, prefix, prop, var_df[var])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 881, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4.0\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 881, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4.0\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 899, "name": "setup", "kind": "ref", "category": "function", "info": " self._scales[var] = scale_spec.setup(var_df[var], prop)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 918, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(var_df, view)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 929, "name": "setup", "kind": "ref", "category": "function", "info": " scale = scale_spec.setup(seed_values, prop, axis=axis_obj)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 934, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(layer_df, view)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 935, "name": "scale", "kind": "ref", "category": "function", "info": " new_series.loc[idx] = scale(layer_df.loc[idx, var])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 938, "name": "set_scale_obj", "kind": "ref", "category": "function", "info": " set_scale_obj(view[\"ax\"], axis, scale.matplotlib_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 946, "name": "_compute_stats", "kind": "def", "category": "function", "info": " def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> ScaleSpec:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, ScaleSpec):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 982, "name": "match", "kind": "ref", "category": "function", "info": " drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 986, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 992, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(grouper)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1000, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> ScaleSpec:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, ScaleSpec):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1009, "name": "infer_scale", "kind": "ref", "category": "function", "info": " scale = prop.infer_scale(arg, values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1011, "name": "default_scale", "kind": "ref", "category": "function", "info": " scale = prop.default_scale(values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1015, "name": "_setup_scales", "kind": "def", "category": "function", "info": " def _setup_scales(self, p: Plot, layers: list[Layer]) -> None:\n\n # Identify all of the variables that will be used at some point in the plot\n variables = set()\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n variables.update(df.columns)\n else:\n variables.update(layer[\"data\"].frame.columns)\n\n for var in variables:\n\n if var in self._scales:\n # Scales for coordinate variables added in _transform_coords\n continue\n\n # Get the data all the distinct appearances of this variable.\n parts = []\n for layer in layers:\n if layer[\"data\"].frame.empty and layer[\"data\"].frames:\n for df in layer[\"data\"].frames.values():\n parts.append(df.get(var))\n else:\n parts.append(layer[\"data\"].frame.get(var))\n var_values = pd.concat(\n parts, axis=0, join=\"inner\", ignore_index=True\n ).rename(var)\n\n # Determine whether this is an coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n axis = None\n else:\n var = m[\"prefix\"]\n axis = m[\"axis\"]\n\n prop = PROPERTIES.get(var if axis is None else axis, Property())\n scale_spec = self._get_scale(p, var, prop, var_values)\n\n # Initialize the data-dependent parameters of the scale\n # Note that this returns a copy and does not mutate the original\n # This dictionary is used by the semantic mappings\n if scale_spec is None:\n # TODO what is the cleanest way to implement identity scale?\n # We don't really need a ScaleSpec, and Identity() will be\n # overloaded anyway (but maybe a general Identity object\n # that can be used as Scale/Mark/Stat/Move?)\n # Note that this may not be the right spacer to use\n # (but that is only relevant for coordinates, where identity scale\n # doesn't make sense or is poorly defined, since we don't use pixels.)\n self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n else:\n scale = scale_spec.setup(var_values, prop)\n if isinstance(prop, Coordinate):\n # If we have a coordinate here, we didn't assign a scale for it\n # in _transform_coords, which means it was added during compute_stat\n # This allows downstream orientation inference to work properly.\n # But it feels a little hacky, so perhaps revisit.\n scale.scale_type = \"computed\"\n self._scales[var] = scale\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1046, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1053, "name": "Property", "kind": "ref", "category": "function", "info": " prop = PROPERTIES.get(var if axis is None else axis, Property())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1054, "name": "_get_scale", "kind": "ref", "category": "function", "info": " scale_spec = self._get_scale(p, var, prop, var_values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1067, "name": "Scale", "kind": "ref", "category": "function", "info": " self._scales[var] = Scale([], lambda x: x, None, \"identity\", None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1069, "name": "setup", "kind": "ref", "category": "function", "info": " scale = scale_spec.setup(var_values, prop)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1078, "name": "_plot_layer", "kind": "def", "category": "function", "info": " def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1089, "name": "_generate_pairings", "kind": "ref", "category": "function", "info": " for subplots, df, scales in self._generate_pairings(data, pair_variables):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1091, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1093, "name": "get_order", "kind": "def", "category": "function", "info": " def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return scales[var].order\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient].spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(\n grouping_vars, df, subplots\n )\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1102, "name": "_resolve", "kind": "ref", "category": "function", "info": " width = mark._resolve(df, \"width\", None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1106, "name": "spacing", "kind": "ref", "category": "function", "info": " df[\"width\"] = width * scales[orient].spacing(df[orient])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1113, "name": "_resolve", "kind": "ref", "category": "function", "info": " baseline = mark._resolve(df, \"baseline\", None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1130, "name": "get_order", "kind": "ref", "category": "function", "info": " order = {var: get_order(var) for var in move_groupers}\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1131, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1132, "name": "move_step", "kind": "ref", "category": "function", "info": " df = move_step(df, groupby, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1134, "name": "_unscale_coords", "kind": "ref", "category": "function", "info": " df = self._unscale_coords(subplots, df, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1137, "name": "_setup_split_generator", "kind": "ref", "category": "function", "info": " split_generator = self._setup_split_generator(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1141, "name": "_plot", "kind": "ref", "category": "function", "info": " mark._plot(split_generator, scales, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1148, "name": "_update_legend_contents", "kind": "ref", "category": "function", "info": " self._update_legend_contents(mark, data, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1150, "name": "_scale_coords", "kind": "def", "category": "function", "info": " def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1153, "name": "match", "kind": "ref", "category": "function", "info": " coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1162, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " view_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1168, "name": "scale", "kind": "ref", "category": "function", "info": " out_df.loc[values.index, var] = scale(values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1172, "name": "_unscale_coords", "kind": "def", "category": "function", "info": " def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1176, "name": "match", "kind": "ref", "category": "function", "info": " coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1186, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " view_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1192, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = axis.get_transform().inverted().transform\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1204, "name": "_generate_pairings", "kind": "def", "category": "function", "info": " def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1241, "name": "match", "kind": "ref", "category": "function", "info": " cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1246, "name": "_get_subplot_index", "kind": "def", "category": "function", "info": " def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1257, "name": "_filter_subplot_data", "kind": "def", "category": "function", "info": " def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1268, "name": "_setup_split_generator", "kind": "def", "category": "function", "info": " def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = self._scales[var].order\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1281, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(df[var])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1284, "name": "split_generator", "kind": "def", "category": "function", "info": " def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n axes_df = axes_df.assign(\n x=axes_df[\"x\"].where(present),\n y=axes_df[\"y\"].where(present),\n )\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1288, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " axes_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1322, "name": "get_group", "kind": "ref", "category": "function", "info": " df_subset = grouped_df.get_group(pd_key)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1342, "name": "_update_legend_contents", "kind": "def", "category": "function", "info": " def _update_legend_contents(\n self, mark: Mark, data: PlotData, scales: dict[str, Scale]\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str | None, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var].legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n entry = (data.names[var], data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1376, "name": "_legend_artist", "kind": "ref", "category": "function", "info": " artists.append(mark._legend_artist(variables, val, scales))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1381, "name": "_make_legend", "kind": "def", "category": "function", "info": " def _make_legend(self) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str | None, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name, # TODO don't show \"None\" as title\n loc=\"center left\",\n bbox_to_anchor=(.98, .55),\n )\n\n # TODO: This is an illegal hack accessing private attributes on the legend\n # We need to sort out how we are going to handle this given that lack of a\n # proper API to do things like position legends relative to each other\n if base_legend:\n base_legend._legend_box._children.extend(legend._legend_box._children)\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1407, "name": "Legend", "kind": "ref", "category": "function", "info": " legend = mpl.legend.Legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 46, "name": "Property", "kind": "def", "category": "class", "info": "__init__\tdefault_scale\tinfer_scale\tget_mapping\tstandardize\t_check_dict_entries\t_check_list_length"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 61, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> ScaleSpec:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n # TODO allow variable_type to be \"boolean\" if that's a scale?\n # TODO how will this handle data with units that can be treated as numeric\n # if passed through a registered matplotlib converter?\n var_type = variable_type(data, boolean_type=\"numeric\")\n if var_type == \"numeric\":\n return Continuous()\n elif var_type == \"datetime\":\n return Temporal()\n # TODO others\n # time-based (TimeStamp, TimeDelta, Period)\n # boolean scale?\n else:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(transform=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 66, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 68, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 70, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 75, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 77, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(transform=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 88, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(transform=arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 97, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 105, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 109, "name": "_check_dict_entries", "kind": "def", "category": "function", "info": " def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 117, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 147, "name": "Coordinate", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 158, "name": "IntervalProperty", "kind": "def", "category": "class", "info": "default_range\t_forward\t_inverse\tinfer_scale\tget_mapping\t_get_categorical_mapping"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 166, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 170, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 174, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 178, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(transform=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 184, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 185, "name": "variable_type", "kind": "ref", "category": "function", "info": " elif variable_type(data) == \"categorical\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 186, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 187, "name": "variable_type", "kind": "ref", "category": "function", "info": " elif variable_type(data) == \"datetime\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 188, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 191, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 193, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 198, "name": "_get_categorical_mapping", "kind": "ref", "category": "function", "info": " return self._get_categorical_mapping(scale, data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 201, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(self.default_range)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 203, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(scale.values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 216, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 217, "name": "_inverse", "kind": "ref", "category": "function", "info": " return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 221, "name": "_get_categorical_mapping", "kind": "def", "category": "function", "info": " def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 225, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 228, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 231, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 245, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward([vmin, vmax])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 246, "name": "_inverse", "kind": "ref", "category": "function", "info": " values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 248, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 258, "name": "PointSize", "kind": "def", "category": "class", "info": "_forward\t_inverse"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 265, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values):\n \"\"\"Square native values to implement linear scaling of point area.\"\"\"\n return np.square(values)\n\n def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 269, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 274, "name": "LineWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 277, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 283, "name": "EdgeWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 286, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 292, "name": "Stroke", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 297, "name": "Alpha", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 308, "name": "ObjectProperty", "kind": "def", "category": "class", "info": "_default_values\tdefault_scale\tinfer_scale\tget_mapping"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 317, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 320, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 321, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 323, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 324, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 326, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 331, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 335, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 338, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 340, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(n)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 348, "name": "standardize", "kind": "ref", "category": "function", "info": " values = [self.standardize(x) for x in values]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 350, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 360, "name": "Marker", "kind": "def", "category": "class", "info": "standardize\t_default_values"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 362, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " null_value = MarkerStyle(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 369, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: MarkerPattern) -> MarkerStyle:\n return MarkerStyle(val)\n\n def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 370, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return MarkerStyle(val)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 372, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 399, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " markers = [MarkerStyle(m) for m in markers[:n]]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 404, "name": "LineStyle", "kind": "def", "category": "class", "info": "standardize\t_default_values\t_get_dash_pattern"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 408, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: str | DashPattern) -> DashPatternWithOffset:\n return self._get_dash_pattern(val)\n\n def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 409, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return self._get_dash_pattern(val)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 411, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 452, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return [self._get_dash_pattern(x) for x in dashes]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 455, "name": "_get_dash_pattern", "kind": "def", "category": "function", "info": " def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 506, "name": "Color", "kind": "def", "category": "class", "info": "standardize\t_standardize_color_sequence\tinfer_scale\t_get_categorical_mapping\tget_mapping"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 511, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: ColorSpec) -> RGBTuple | RGBATuple:\n # Return color with alpha channel only if the input spec has it\n # This is so that RGBA colors can override the Alpha property\n if to_rgba(val) != to_rgba(val, 1):\n return to_rgba(val)\n else:\n return to_rgb(val)\n\n def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 519, "name": "_standardize_color_sequence", "kind": "def", "category": "function", "info": " def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 521, "name": "has_alpha", "kind": "def", "category": "function", "info": " def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 527, "name": "has_alpha", "kind": "ref", "category": "function", "info": " needs_alpha = any(has_alpha(x) for x in colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 534, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(transform=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 539, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"categorical\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 542, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 549, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 550, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 553, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 569, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 571, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 574, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 576, "name": "_get_categorical_mapping", "kind": "def", "category": "function", "info": " def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 578, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 583, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 587, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 589, "name": "blend_palette", "kind": "ref", "category": "function", "info": " colors = blend_palette(values, n)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 591, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(values, n)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 593, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n <= len(get_color_cycle()):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 595, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 597, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 607, "name": "_standardize_color_sequence", "kind": "ref", "category": "function", "info": " colors = self._standardize_color_sequence(colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 609, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 618, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 625, "name": "_get_categorical_mapping", "kind": "ref", "category": "function", "info": " return self._get_categorical_mapping(scale, data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 629, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(\"ch:\", as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 633, "name": "blend_palette", "kind": "ref", "category": "function", "info": " mapping = blend_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 638, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 649, "name": "_mapping", "kind": "def", "category": "function", "info": " def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 653, "name": "mapping", "kind": "ref", "category": "function", "info": " out = mapping(x)[:, :3]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 665, "name": "Fill", "kind": "def", "category": "class", "info": "standardize\t_default_values\tdefault_scale\tinfer_scale\tget_mapping"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 674, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> bool:\n return bool(val)\n\n def _default_values(self, n: int) -> list:\n \"\"\"Return a list of n values, alternating True and False.\"\"\"\n if n > 2:\n msg = \" \".join([\n f\"The variable assigned to {self.variable} has more than two levels,\",\n f\"so {self.variable} values will cycle and may be uninterpretable\",\n ])\n # TODO fire in a \"nice\" way (see above)\n warnings.warn(msg, UserWarning)\n return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]\n\n def default_scale(self, data: Series) -> Nominal:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO infer Boolean where possible?\n return Nominal(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps each data value to True or False.\"\"\"\n # TODO categorical_order is going to return [False, True] for booleans,\n # and [0, 1] for binary, but the default values order is [True, False].\n # We should special case this to handle it properly, or change\n # categorical_order to not \"sort\" booleans. Note that we need to sync with\n # what's going to happen upstream in the scale, so we can't just do it here.\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n\n if isinstance(scale.values, list):\n values = [bool(x) for x in scale.values]\n elif isinstance(scale.values, dict):\n values = [bool(scale.values[x]) for x in levels]\n elif scale.values is None:\n values = self._default_values(len(levels))\n else:\n msg = \" \".join([\n f\"Scale values for {self.variable} must be passed in\",\n f\"a list or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else False\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 677, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 688, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 690, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 692, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> ScaleSpec:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(transform=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 695, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 697, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: ScaleSpec, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 707, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 714, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(len(levels))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 722, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 765, "name": "cls", "kind": "ref", "category": "function", "info": "PROPERTIES = {var: cls(var) for var, cls in PROPERTY_CLASSES.items()}\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 16, "name": "VarType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 37, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(\n vector: Series,\n boolean_type: Literal[\"numeric\", \"categorical\"] = \"numeric\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 65, "name": "is_categorical_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_categorical_dtype(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 66, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 70, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 86, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(boolean_type)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 89, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 90, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 92, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 93, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 99, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VarType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 105, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 106, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 110, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 116, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 117, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 121, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 124, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector: Series, order: list | None = None) -> list:\n \"\"\"\n Return a list of unique data values using seaborn's ordering rules.\n\n Parameters\n ----------\n vector : Series\n Vector of \"categorical\" values\n order : list\n Desired order of category levels to override the order determined\n from the `data` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is not None:\n return order\n\n if vector.dtype.name == \"category\":\n order = list(vector.cat.categories)\n else:\n order = list(filter(pd.notnull, vector.unique()))\n if variable_type(order) == \"numeric\":\n order.sort()\n\n return order\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 149, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(order) == \"numeric\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 47, "name": "Scale", "kind": "def", "category": "class", "info": "__init__\t__call__\t_apply_pipeline\tspacing\tinvert_axis_transform\tset_default_locators_and_formatters"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 69, "name": "_apply_pipeline", "kind": "ref", "category": "function", "info": " return self._apply_pipeline(data, self.forward_pipe)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 73, "name": "_apply_pipeline", "kind": "def", "category": "function", "info": " def _apply_pipeline(\n self, data: ArrayLike, pipeline: Pipeline,\n ) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n def spacing(self, data: Series) -> float:\n return self.spacer(data)\n\n def invert_axis_transform(self, x):\n # TODO we may no longer need this method as we use the axis\n # transform directly in Plotter._unscale_coords\n finv = self.matplotlib_scale.get_transform().inverted().transform\n out = finv(x)\n if isinstance(x, pd.Series):\n return pd.Series(out, index=x.index, name=x.name)\n return out\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 85, "name": "func", "kind": "ref", "category": "function", "info": " data = func(data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 93, "name": "spacer", "kind": "ref", "category": "function", "info": " return self.spacer(data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 95, "name": "invert_axis_transform", "kind": "def", "category": "function", "info": " def invert_axis_transform(self, x):\n # TODO we may no longer need this method as we use the axis\n # transform directly in Plotter._unscale_coords\n finv = self.matplotlib_scale.get_transform().inverted().transform\n out = finv(x)\n if isinstance(x, pd.Series):\n return pd.Series(out, index=x.index, name=x.name)\n return out\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 98, "name": "inverted", "kind": "ref", "category": "function", "info": " finv = self.matplotlib_scale.get_transform().inverted().transform\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 99, "name": "finv", "kind": "ref", "category": "function", "info": " out = finv(x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 106, "name": "ScaleSpec", "kind": "def", "category": "class", "info": "__post_init__\ttick\tformat\tsetup\t_get_scale"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 114, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n # TODO do we need anything else here?\n self.tick()\n self.format()\n\n def tick(self):\n # TODO what is the right base method?\n self._major_locator: Locator\n self._minor_locator: Locator\n return self\n\n def format(self):\n self._major_formatter: Formatter\n return self\n\n def setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n ...\n\n # TODO typing\n def _get_scale(self, name, forward, inverse):\n\n major_locator = self._major_locator\n minor_locator = self._minor_locator\n\n # TODO hack, need to add default to Continuous\n major_formatter = getattr(self, \"_major_formatter\", ScalarFormatter())\n # major_formatter = self._major_formatter\n\n class Scale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return Scale(name, (forward, inverse))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 117, "name": "tick", "kind": "ref", "category": "function", "info": " self.tick()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 120, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self):\n # TODO what is the right base method?\n self._major_locator: Locator\n self._minor_locator: Locator\n return self\n\n def format(self):\n self._major_formatter: Formatter\n return self\n\n def setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n ...\n\n # TODO typing\n def _get_scale(self, name, forward, inverse):\n\n major_locator = self._major_locator\n minor_locator = self._minor_locator\n\n # TODO hack, need to add default to Continuous\n major_formatter = getattr(self, \"_major_formatter\", ScalarFormatter())\n # major_formatter = self._major_formatter\n\n class Scale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return Scale(name, (forward, inverse))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 130, "name": "setup", "kind": "def", "category": "function", "info": " def setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n ...\n\n # TODO typing\n def _get_scale(self, name, forward, inverse):\n\n major_locator = self._major_locator\n minor_locator = self._minor_locator\n\n # TODO hack, need to add default to Continuous\n major_formatter = getattr(self, \"_major_formatter\", ScalarFormatter())\n # major_formatter = self._major_formatter\n\n class Scale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return Scale(name, (forward, inverse))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 136, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(self, name, forward, inverse):\n\n major_locator = self._major_locator\n minor_locator = self._minor_locator\n\n # TODO hack, need to add default to Continuous\n major_formatter = getattr(self, \"_major_formatter\", ScalarFormatter())\n # major_formatter = self._major_formatter\n\n class Scale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return Scale(name, (forward, inverse))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 145, "name": "Scale", "kind": "def", "category": "class", "info": "__init__\t__call__\t_apply_pipeline\tspacing\tinvert_axis_transform\tset_default_locators_and_formatters"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 152, "name": "Scale", "kind": "ref", "category": "function", "info": " return Scale(name, (forward, inverse))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 156, "name": "Nominal", "kind": "def", "category": "class", "info": "setup"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 164, "name": "setup", "kind": "def", "category": "function", "info": " def setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n ...\n\n # TODO typing\n def _get_scale(self, name, forward, inverse):\n\n major_locator = self._major_locator\n minor_locator = self._minor_locator\n\n # TODO hack, need to add default to Continuous\n major_formatter = getattr(self, \"_major_formatter\", ScalarFormatter())\n # major_formatter = self._major_formatter\n\n class Scale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return Scale(name, (forward, inverse))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 168, "name": "CatScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 178, "name": "categorical_order", "kind": "ref", "category": "function", "info": " units_seed = categorical_order(data, self.order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 180, "name": "CatScale", "kind": "ref", "category": "function", "info": " mpl_scale = CatScale(data.name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 182, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 192, "name": "stringify", "kind": "ref", "category": "function", "info": " axis.update_units(stringify(np.array(units_seed)))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 203, "name": "stringify", "kind": "ref", "category": "function", "info": " out[keep] = axis.convert_units(stringify(x[keep]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 208, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(self, data),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 212, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n return 1\n\n if prop.legend:\n legend = units_seed, list(stringify(units_seed))\n else:\n legend = None\n\n scale_type = self.__class__.__name__.lower()\n scale = Scale(forward_pipe, spacer, legend, scale_type, mpl_scale)\n return scale\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 216, "name": "stringify", "kind": "ref", "category": "function", "info": " legend = units_seed, list(stringify(units_seed))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 221, "name": "Scale", "kind": "ref", "category": "function", "info": " scale = Scale(forward_pipe, spacer, legend, scale_type, mpl_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 226, "name": "Ordinal", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 232, "name": "Discrete", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 238, "name": "ContinuousBase", "kind": "def", "category": "class", "info": "setup\t_get_transform"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 243, "name": "setup", "kind": "def", "category": "function", "info": " def setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n ...\n\n # TODO typing\n def _get_scale(self, name, forward, inverse):\n\n major_locator = self._major_locator\n minor_locator = self._minor_locator\n\n # TODO hack, need to add default to Continuous\n major_formatter = getattr(self, \"_major_formatter\", ScalarFormatter())\n # major_formatter = self._major_formatter\n\n class Scale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return Scale(name, (forward, inverse))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 248, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = self._get_transform()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 250, "name": "_get_scale", "kind": "ref", "category": "function", "info": " mpl_scale = self._get_scale(data.name, forward, inverse)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 253, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 265, "name": "forward", "kind": "ref", "category": "function", "info": " a = forward(vmin)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 266, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 266, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 268, "name": "normalize", "kind": "def", "category": "function", "info": " def normalize(x):\n return (x - a) / b\n\n else:\n normalize = vmin = vmax = None\n\n forward_pipe = [\n axis.convert_units,\n forward,\n normalize,\n prop.get_mapping(new, data)\n ]\n\n def spacer(x):\n return np.min(np.diff(np.sort(x.dropna().unique())))\n\n # TODO make legend optional on per-plot basis with ScaleSpec parameter?\n if prop.legend:\n axis.set_view_interval(vmin, vmax)\n locs = axis.major.locator()\n locs = locs[(vmin <= locs) & (locs <= vmax)]\n labels = axis.major.formatter.format_ticks(locs)\n legend = list(locs), list(labels)\n\n else:\n legend = None\n\n scale_type = self.__class__.__name__.lower()\n return Scale(forward_pipe, spacer, legend, scale_type, mpl_scale)\n\n def _get_transform(self):\n\n arg = self.transform\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n # TODO useful error message\n raise ValueError()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 278, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(new, data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 281, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n return 1\n\n if prop.legend:\n legend = units_seed, list(stringify(units_seed))\n else:\n legend = None\n\n scale_type = self.__class__.__name__.lower()\n scale = Scale(forward_pipe, spacer, legend, scale_type, mpl_scale)\n return scale\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 287, "name": "locator", "kind": "ref", "category": "function", "info": " locs = axis.major.locator()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 296, "name": "Scale", "kind": "ref", "category": "function", "info": " return Scale(forward_pipe, spacer, legend, scale_type, mpl_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 298, "name": "_get_transform", "kind": "def", "category": "function", "info": " def _get_transform(self):\n\n arg = self.transform\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n # TODO useful error message\n raise ValueError()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 302, "name": "get_param", "kind": "def", "category": "function", "info": " def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n # TODO useful error message\n raise ValueError()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 308, "name": "_make_identity_transforms", "kind": "ref", "category": "function", "info": " return _make_identity_transforms()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 313, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 315, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"logit\", 10)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 316, "name": "_make_logit_transforms", "kind": "ref", "category": "function", "info": " return _make_logit_transforms(base)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 318, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"log\", 10)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 319, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms(base)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 321, "name": "get_param", "kind": "ref", "category": "function", "info": " c = get_param(\"symlog\", 1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 322, "name": "_make_symlog_transforms", "kind": "ref", "category": "function", "info": " return _make_symlog_transforms(c)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 324, "name": "get_param", "kind": "ref", "category": "function", "info": " exp = get_param(\"pow\", 2)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 325, "name": "_make_power_transforms", "kind": "ref", "category": "function", "info": " return _make_power_transforms(exp)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 327, "name": "_make_sqrt_transforms", "kind": "ref", "category": "function", "info": " return _make_sqrt_transforms()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 334, "name": "Continuous", "kind": "def", "category": "class", "info": "tick"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 346, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous: # TODO type return value as Self\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator: matplotlib Locator\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n Returns self with new tick configuration.\n\n \"\"\"\n\n # TODO what about symlog?\n if isinstance(self.transform, str):\n m = re.match(r\"log(\\d*)\", self.transform)\n log_transform = m is not None\n log_base = m[1] or 10 if m is not None else None\n forward, inverse = self._get_transform()\n else:\n log_transform = False\n log_base = forward = inverse = None\n\n if locator is not None:\n # TODO accept tuple for major, minor?\n if not isinstance(locator, Locator):\n err = (\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n raise TypeError(err)\n major_locator = locator\n\n # TODO raise if locator is passed with any other parameters\n\n elif upto is not None:\n if log_transform:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n if log_transform:\n msg = \"`count` requires `between` with log transform.\"\n raise RuntimeError(msg)\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_transform:\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if log_transform:\n msg = \"`every` not supported with log transform.\"\n raise RuntimeError(msg)\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n major_locator = LogLocator(log_base) if log_transform else AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_transform else None\n else:\n if log_transform:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n self._major_locator = major_locator\n self._minor_locator = minor_locator\n\n return self\n\n # TODO need to fill this out\n # def format(self, ...):\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 384, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"log(\\d*)\", self.transform)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 387, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = self._get_transform()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 419, "name": "forward", "kind": "ref", "category": "function", "info": " lo, hi = forward(between)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 420, "name": "inverse", "kind": "ref", "category": "function", "info": " ticks = inverse(np.linspace(lo, hi, num=count))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 461, "name": "Temporal", "kind": "def", "category": "class", "info": "tick\tformat"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 477, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous: # TODO type return value as Self\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator: matplotlib Locator\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n Returns self with new tick configuration.\n\n \"\"\"\n\n # TODO what about symlog?\n if isinstance(self.transform, str):\n m = re.match(r\"log(\\d*)\", self.transform)\n log_transform = m is not None\n log_base = m[1] or 10 if m is not None else None\n forward, inverse = self._get_transform()\n else:\n log_transform = False\n log_base = forward = inverse = None\n\n if locator is not None:\n # TODO accept tuple for major, minor?\n if not isinstance(locator, Locator):\n err = (\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n raise TypeError(err)\n major_locator = locator\n\n # TODO raise if locator is passed with any other parameters\n\n elif upto is not None:\n if log_transform:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n if log_transform:\n msg = \"`count` requires `between` with log transform.\"\n raise RuntimeError(msg)\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_transform:\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if log_transform:\n msg = \"`every` not supported with log transform.\"\n raise RuntimeError(msg)\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n major_locator = LogLocator(log_base) if log_transform else AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_transform else None\n else:\n if log_transform:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n self._major_locator = major_locator\n self._minor_locator = minor_locator\n\n return self\n\n # TODO need to fill this out\n # def format(self, ...):\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 525, "name": "Calendric", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 530, "name": "Binned", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 544, "name": "PseudoAxis", "kind": "def", "category": "class", "info": "__init__\tset_view_interval\tget_view_interval\tset_data_interval\tget_data_interval\tget_tick_space\tset_major_locator\tset_major_formatter\tset_minor_locator\tset_minor_formatter\tset_units\tupdate_units\tconvert_units\tget_scale\tget_majorticklocs"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 561, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.major = mpl.axis.Ticker()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 562, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.minor = mpl.axis.Ticker()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 618, "name": "get_converter", "kind": "ref", "category": "function", "info": " self.converter = mpl.units.registry.get_converter(x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 620, "name": "default_units", "kind": "ref", "category": "function", "info": " self.converter.default_units(x, self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 622, "name": "axisinfo", "kind": "ref", "category": "function", "info": " info = self.converter.axisinfo(self.units, self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 641, "name": "convert", "kind": "ref", "category": "function", "info": " return self.converter.convert(x, self.units, self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 651, "name": "locator", "kind": "ref", "category": "function", "info": " return self.major.locator()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 657, "name": "_make_identity_transforms", "kind": "def", "category": "function", "info": "def _make_identity_transforms() -> Transforms:\n\n def identity(x):\n return x\n\n return identity, identity\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 665, "name": "_make_logit_transforms", "kind": "def", "category": "function", "info": "def _make_logit_transforms(base: float = None) -> Transforms:\n\n log, exp = _make_log_transforms(base)\n\n def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 667, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 669, "name": "logit", "kind": "def", "category": "function", "info": " def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 673, "name": "expit", "kind": "def", "category": "function", "info": " def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 680, "name": "_make_log_transforms", "kind": "def", "category": "function", "info": "def _make_log_transforms(base: float | None = None) -> Transforms:\n\n if base is None:\n fs = np.log, np.exp\n elif base == 2:\n fs = np.log2, partial(np.power, 2)\n elif base == 10:\n fs = np.log10, partial(np.power, 10)\n else:\n def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 689, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 704, "name": "_make_symlog_transforms", "kind": "def", "category": "function", "info": "def _make_symlog_transforms(c: float = 1, base: float = 10) -> Transforms:\n\n # From https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001\n\n # Note: currently not using base because we only get\n # one parameter from the string, and are using c (this is consistent with d3)\n\n log, exp = _make_log_transforms(base)\n\n def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 711, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 713, "name": "symlog", "kind": "def", "category": "function", "info": " def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 717, "name": "symexp", "kind": "def", "category": "function", "info": " def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 724, "name": "_make_sqrt_transforms", "kind": "def", "category": "function", "info": "def _make_sqrt_transforms() -> Transforms:\n\n def sqrt(x):\n return np.sign(x) * np.sqrt(np.abs(x))\n\n def square(x):\n return np.sign(x) * np.square(x)\n\n return sqrt, square\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 735, "name": "_make_power_transforms", "kind": "def", "category": "function", "info": "def _make_power_transforms(exp: float) -> Transforms:\n\n def forward(x):\n return np.sign(x) * np.power(np.abs(x), exp)\n\n def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 737, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 740, "name": "inverse", "kind": "def", "category": "function", "info": " def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 15, "name": "Subplots", "kind": "def", "category": "class", "info": "__init__\t_check_dimension_uniqueness\t_determine_grid_dimensions\t_handle_wrapping\t_determine_axis_sharing\tinit_figure\t__iter__\t__len__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 41, "name": "_check_dimension_uniqueness", "kind": "ref", "category": "function", "info": " self._check_dimension_uniqueness(facet_spec, pair_spec)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 42, "name": "_determine_grid_dimensions", "kind": "ref", "category": "function", "info": " self._determine_grid_dimensions(facet_spec, pair_spec)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 43, "name": "_handle_wrapping", "kind": "ref", "category": "function", "info": " self._handle_wrapping(facet_spec, pair_spec)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 44, "name": "_determine_axis_sharing", "kind": "ref", "category": "function", "info": " self._determine_axis_sharing(pair_spec)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 46, "name": "_check_dimension_uniqueness", "kind": "def", "category": "function", "info": " def _check_dimension_uniqueness(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Reject specs that pair and facet on (or wrap to) same figure dimension.\"\"\"\n err = None\n\n facet_vars = facet_spec.get(\"variables\", {})\n\n if facet_spec.get(\"wrap\") and {\"col\", \"row\"} <= set(facet_vars):\n err = \"Cannot wrap facets when specifying both `col` and `row`.\"\n elif (\n pair_spec.get(\"wrap\")\n and pair_spec.get(\"cross\", True)\n and len(pair_spec.get(\"structure\", {}).get(\"x\", [])) > 1\n and len(pair_spec.get(\"structure\", {}).get(\"y\", [])) > 1\n ):\n err = \"Cannot wrap subplots when pairing on both `x` and `y`.\"\n\n collisions = {\"x\": [\"columns\", \"rows\"], \"y\": [\"rows\", \"columns\"]}\n for pair_axis, (multi_dim, wrap_dim) in collisions.items():\n if pair_axis not in pair_spec.get(\"structure\", {}):\n continue\n elif multi_dim[:3] in facet_vars:\n err = f\"Cannot facet the {multi_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and facet_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and pair_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {multi_dim} while faceting the {wrap_dim}.\"\n\n if err is not None:\n raise RuntimeError(err) # TODO what err class? Define PlotSpecError?\n\n def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap in [None, 1] and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 78, "name": "_determine_grid_dimensions", "kind": "def", "category": "function", "info": " def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap in [None, 1] and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 102, "name": "_handle_wrapping", "kind": "def", "category": "function", "info": " def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap in [None, 1] and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 121, "name": "_determine_axis_sharing", "kind": "def", "category": "function", "info": " def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap in [None, 1] and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 142, "name": "init_figure", "kind": "def", "category": "function", "info": " def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_decorators.py", "rel_fname": "seaborn/_decorators.py", "line": 3, "name": "share_init_params_with_map", "kind": "def", "category": "function", "info": "def share_init_params_with_map(cls):\n \"\"\"Make cls.map a classmethod with same signature as cls.__init__.\"\"\"\n map_sig = signature(cls.map)\n init_sig = signature(cls.__init__)\n\n new = [v for k, v in init_sig.parameters.items() if k != \"self\"]\n new.insert(0, map_sig.parameters[\"cls\"])\n cls.map.__signature__ = map_sig.replace(parameters=new)\n cls.map.__doc__ = cls.__init__.__doc__\n\n cls.map = classmethod(cls.map)\n\n return cls\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 5, "name": "DocstringComponents", "kind": "def", "category": "class", "info": "__init__\t__getattr__\tfrom_nested_components\tfrom_function_params"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 18, "name": "group", "kind": "ref", "category": "function", "info": " entries[key] = m.group(1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 24, "name": "__getattr__", "kind": "def", "category": "function", "info": " def __getattr__(self, attr):\n \"\"\"Provide dot access to entries for clean raw docstrings.\"\"\"\n if attr in self.entries:\n return self.entries[attr]\n else:\n try:\n return self.__getattribute__(attr)\n except AttributeError as err:\n # If Python is run with -OO, it will strip docstrings and our lookup\n # from self.entries will fail. We check for __debug__, which is actually\n # set to False by -O (it is True for normal execution).\n # But we only want to see an error when building the docs;\n # not something users should see, so this slight inconsistency is fine.\n if __debug__:\n raise err\n else:\n pass\n\n @classmethod\n def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 43, "name": "from_nested_components", "kind": "def", "category": "function", "info": " def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 45, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(kwargs, strip_whitespace=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 48, "name": "from_function_params", "kind": "def", "category": "function", "info": " def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 50, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 58, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(comp_dict)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 194, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " params=DocstringComponents(_core_params),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 195, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " returns=DocstringComponents(_core_returns),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 196, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " seealso=DocstringComponents(_seealso_blurbs),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 19, "name": "AreaBase", "kind": "def", "category": "class", "info": "_plot\t_standardize_coordinate_parameters\t_get_verts\t_legend_artist"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 21, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n kws = {}\n\n for keys, data, ax in split_gen():\n\n kws.setdefault(ax, defaultdict(list))\n\n data = self._standardize_coordinate_parameters(data, orient)\n resolved = resolve_properties(self, keys, scales)\n verts = self._get_verts(data, orient)\n\n ax.update_datalim(verts)\n kws[ax][\"verts\"].append(verts)\n\n # TODO fill= is not working here properly\n # We could hack a fix, but would be better to handle fill in resolve_color\n\n kws[ax][\"facecolors\"].append(resolve_color(self, keys, \"\", scales))\n kws[ax][\"edgecolors\"].append(resolve_color(self, keys, \"edge\", scales))\n\n kws[ax][\"linewidth\"].append(resolved[\"edgewidth\"])\n kws[ax][\"linestyle\"].append(resolved[\"edgestyle\"])\n\n for ax, ax_kws in kws.items():\n ax.add_collection(mpl.collections.PolyCollection(**ax_kws))\n\n def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n return mpl.patches.Patch(\n facecolor=resolve_color(self, keys, \"\", scales),\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 25, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 29, "name": "_standardize_coordinate_parameters", "kind": "ref", "category": "function", "info": " data = self._standardize_coordinate_parameters(data, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 30, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 31, "name": "_get_verts", "kind": "ref", "category": "function", "info": " verts = self._get_verts(data, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 33, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(verts)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 39, "name": "resolve_color", "kind": "ref", "category": "function", "info": " kws[ax][\"facecolors\"].append(resolve_color(self, keys, \"\", scales))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 40, "name": "resolve_color", "kind": "ref", "category": "function", "info": " kws[ax][\"edgecolors\"].append(resolve_color(self, keys, \"edge\", scales))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 46, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(mpl.collections.PolyCollection(**ax_kws))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 46, "name": "PolyCollection", "kind": "ref", "category": "function", "info": " ax.add_collection(mpl.collections.PolyCollection(**ax_kws))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 48, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n return mpl.patches.Patch(\n facecolor=resolve_color(self, keys, \"\", scales),\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 51, "name": "_get_verts", "kind": "def", "category": "function", "info": " def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n return mpl.patches.Patch(\n facecolor=resolve_color(self, keys, \"\", scales),\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 54, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 56, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}min\"]].to_numpy(),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 57, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 63, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n return mpl.patches.Patch(\n facecolor=resolve_color(self, keys, \"\", scales),\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 66, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 68, "name": "Patch", "kind": "ref", "category": "function", "info": " return mpl.patches.Patch(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 69, "name": "resolve_color", "kind": "ref", "category": "function", "info": " facecolor=resolve_color(self, keys, \"\", scales),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 70, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edgecolor=resolve_color(self, keys, \"edge\", scales),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 78, "name": "Area", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 82, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 83, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 84, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 85, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 86, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 87, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 88, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 91, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 93, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n return mpl.patches.Patch(\n facecolor=resolve_color(self, keys, \"\", scales),\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 95, "name": "rename", "kind": "ref", "category": "function", "info": " return data.rename(columns={\"baseline\": f\"{dv}min\", dv: f\"{dv}max\"})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 99, "name": "Ribbon", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 103, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 104, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 105, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 106, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 107, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 108, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(0, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 109, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableFloat = Mappable(\"-\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 111, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n return mpl.patches.Patch(\n facecolor=resolve_color(self, keys, \"\", scales),\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 24, "name": "Bar", "kind": "def", "category": "class", "info": "_resolve_properties\t_plot\t_legend_artist"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 28, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 29, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 30, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 31, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 32, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 33, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 34, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", )\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(.8, grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 38, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 40, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n def coords_to_geometry(x, y, w, b):\n # TODO possible too slow with lots of bars (e.g. dense hist)\n # Why not just use BarCollection?\n if orient == \"x\":\n w, h = w, y - b\n xy = x - w / 2, b\n else:\n w, h = x - b, w\n xy = b, y - h / 2\n return xy, w, h\n\n for _, data, ax in split_gen():\n\n xys = data[[\"x\", \"y\"]].to_numpy()\n data = self._resolve_properties(data, scales)\n\n bars = []\n for i, (x, y) in enumerate(xys):\n\n baseline = data[\"baseline\"][i]\n width = data[\"width\"][i]\n xy, w, h = coords_to_geometry(x, y, width, baseline)\n\n bar = mpl.patches.Rectangle(\n xy=xy,\n width=w,\n height=h,\n facecolor=data[\"facecolor\"][i],\n edgecolor=data[\"edgecolor\"][i],\n linewidth=data[\"edgewidth\"][i],\n linestyle=data[\"edgestyle\"][i],\n )\n ax.add_patch(bar)\n bars.append(bar)\n\n # TODO add container object to ax, line ax.bar does\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 42, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 44, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 45, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 56, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n def coords_to_geometry(x, y, w, b):\n # TODO possible too slow with lots of bars (e.g. dense hist)\n # Why not just use BarCollection?\n if orient == \"x\":\n w, h = w, y - b\n xy = x - w / 2, b\n else:\n w, h = x - b, w\n xy = b, y - h / 2\n return xy, w, h\n\n for _, data, ax in split_gen():\n\n xys = data[[\"x\", \"y\"]].to_numpy()\n data = self._resolve_properties(data, scales)\n\n bars = []\n for i, (x, y) in enumerate(xys):\n\n baseline = data[\"baseline\"][i]\n width = data[\"width\"][i]\n xy, w, h = coords_to_geometry(x, y, width, baseline)\n\n bar = mpl.patches.Rectangle(\n xy=xy,\n width=w,\n height=h,\n facecolor=data[\"facecolor\"][i],\n edgecolor=data[\"edgecolor\"][i],\n linewidth=data[\"edgewidth\"][i],\n linestyle=data[\"edgestyle\"][i],\n )\n ax.add_patch(bar)\n bars.append(bar)\n\n # TODO add container object to ax, line ax.bar does\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 58, "name": "coords_to_geometry", "kind": "def", "category": "function", "info": " def coords_to_geometry(x, y, w, b):\n # TODO possible too slow with lots of bars (e.g. dense hist)\n # Why not just use BarCollection?\n if orient == \"x\":\n w, h = w, y - b\n xy = x - w / 2, b\n else:\n w, h = x - b, w\n xy = b, y - h / 2\n return xy, w, h\n\n for _, data, ax in split_gen():\n\n xys = data[[\"x\", \"y\"]].to_numpy()\n data = self._resolve_properties(data, scales)\n\n bars = []\n for i, (x, y) in enumerate(xys):\n\n baseline = data[\"baseline\"][i]\n width = data[\"width\"][i]\n xy, w, h = coords_to_geometry(x, y, width, baseline)\n\n bar = mpl.patches.Rectangle(\n xy=xy,\n width=w,\n height=h,\n facecolor=data[\"facecolor\"][i],\n edgecolor=data[\"edgecolor\"][i],\n linewidth=data[\"edgewidth\"][i],\n linestyle=data[\"edgestyle\"][i],\n )\n ax.add_patch(bar)\n bars.append(bar)\n\n # TODO add container object to ax, line ax.bar does\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 69, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 71, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[\"x\", \"y\"]].to_numpy()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 72, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " data = self._resolve_properties(data, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 79, "name": "coords_to_geometry", "kind": "ref", "category": "function", "info": " xy, w, h = coords_to_geometry(x, y, width, baseline)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 81, "name": "Rectangle", "kind": "ref", "category": "function", "info": " bar = mpl.patches.Rectangle(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 90, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(bar)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 95, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 100, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " key = self._resolve_properties(key, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/bars.py", "rel_fname": "seaborn/_marks/bars.py", "line": 101, "name": "Patch", "kind": "ref", "category": "function", "info": " artist = mpl.patches.Patch(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 18, "name": "Mappable", "kind": "def", "category": "class", "info": "__init__\t__repr__\tdepend\tgrouping\tdefault"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 64, "name": "depend", "kind": "def", "category": "function", "info": " def depend(self) -> Any:\n \"\"\"Return the name of the feature to source a default value from.\"\"\"\n return self._depend\n\n @property\n def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 69, "name": "grouping", "kind": "def", "category": "function", "info": " def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 73, "name": "default", "kind": "def", "category": "function", "info": " def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 90, "name": "Mark", "kind": "def", "category": "class", "info": "_mappable_props\t_grouping_props\t_resolve\t_infer_orient\t_plot\t_legend_artist"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 95, "name": "_mappable_props", "kind": "def", "category": "function", "info": " def _mappable_props(self):\n return {\n f.name: getattr(self, f.name) for f in fields(self)\n if isinstance(f.default, Mappable)\n }\n\n @property\n def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n feature = scales[name](data[name])\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x_type = None if \"x\" not in scales else scales[\"x\"].scale_type\n y_type = None if \"y\" not in scales else scales[\"y\"].scale_type\n\n if x_type is None or x_type == \"computed\":\n return \"y\"\n\n elif y_type is None or y_type == \"computed\":\n return \"x\"\n\n elif x_type != \"nominal\" and y_type == \"nominal\":\n return \"y\"\n\n elif x_type != \"continuous\" and y_type == \"continuous\":\n\n # TODO should we try to orient based on number of unique values?\n\n return \"x\"\n\n elif x_type == \"continuous\" and y_type != \"continuous\":\n return \"y\"\n\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 102, "name": "_grouping_props", "kind": "def", "category": "function", "info": " def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n feature = scales[name](data[name])\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x_type = None if \"x\" not in scales else scales[\"x\"].scale_type\n y_type = None if \"y\" not in scales else scales[\"y\"].scale_type\n\n if x_type is None or x_type == \"computed\":\n return \"y\"\n\n elif y_type is None or y_type == \"computed\":\n return \"x\"\n\n elif x_type != \"nominal\" and y_type == \"nominal\":\n return \"y\"\n\n elif x_type != \"continuous\" and y_type == \"continuous\":\n\n # TODO should we try to orient based on number of unique values?\n\n return \"x\"\n\n elif x_type == \"continuous\" and y_type != \"continuous\":\n return \"y\"\n\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 111, "name": "_resolve", "kind": "def", "category": "function", "info": " def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n feature = scales[name](data[name])\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x_type = None if \"x\" not in scales else scales[\"x\"].scale_type\n y_type = None if \"y\" not in scales else scales[\"y\"].scale_type\n\n if x_type is None or x_type == \"computed\":\n return \"y\"\n\n elif y_type is None or y_type == \"computed\":\n return \"x\"\n\n elif x_type != \"nominal\" and y_type == \"nominal\":\n return \"y\"\n\n elif x_type != \"continuous\" and y_type == \"continuous\":\n\n # TODO should we try to orient based on number of unique values?\n\n return \"x\"\n\n elif x_type == \"continuous\" and y_type != \"continuous\":\n return \"y\"\n\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 137, "name": "Property", "kind": "ref", "category": "function", "info": " prop = PROPERTIES.get(name, Property(name))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 149, "name": "standardize", "kind": "ref", "category": "function", "info": " feature = prop.standardize(feature)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 169, "name": "_resolve", "kind": "ref", "category": "function", "info": " return self._resolve(data, feature.depend, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 171, "name": "standardize", "kind": "ref", "category": "function", "info": " default = prop.standardize(feature.default)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 178, "name": "_infer_orient", "kind": "def", "category": "function", "info": " def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x_type = None if \"x\" not in scales else scales[\"x\"].scale_type\n y_type = None if \"y\" not in scales else scales[\"y\"].scale_type\n\n if x_type is None or x_type == \"computed\":\n return \"y\"\n\n elif y_type is None or y_type == \"computed\":\n return \"x\"\n\n elif x_type != \"nominal\" and y_type == \"nominal\":\n return \"y\"\n\n elif x_type != \"continuous\" and y_type == \"continuous\":\n\n # TODO should we try to orient based on number of unique values?\n\n return \"x\"\n\n elif x_type == \"continuous\" and y_type != \"continuous\":\n return \"y\"\n\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 210, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 219, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 226, "name": "resolve_properties", "kind": "def", "category": "function", "info": "def resolve_properties(\n mark: Mark, data: DataFrame, scales: dict[str, Scale]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 231, "name": "_resolve", "kind": "ref", "category": "function", "info": " name: mark._resolve(data, name, scales) for name in mark._mappable_props\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 236, "name": "resolve_color", "kind": "def", "category": "function", "info": "def resolve_color(\n mark: Mark,\n data: DataFrame | dict,\n prefix: str = \"\",\n scales: dict[str, Scale] | None = None,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 262, "name": "_resolve", "kind": "ref", "category": "function", "info": " color = mark._resolve(data, f\"{prefix}color\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 265, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, f\"{prefix}alpha\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 267, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, \"alpha\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 269, "name": "visible", "kind": "def", "category": "function", "info": " def visible(x, axis=None):\n \"\"\"Detect \"invisible\" colors to set alpha appropriately.\"\"\"\n # TODO First clause only needed to handle non-rgba arrays,\n # which we are trying to handle upstream\n return np.array(x).dtype.kind != \"f\" or np.isfinite(x).all(axis)\n\n # Second check here catches vectors of strings with identity scale\n # It could probably be handled better upstream. This is a tricky problem\n if np.ndim(color) < 2 and all(isinstance(x, float) for x in color):\n if len(color) == 4:\n return mpl.colors.to_rgba(color)\n alpha = alpha if visible(color) else np.nan\n return mpl.colors.to_rgba(color, alpha)\n else:\n if np.ndim(color) == 2 and color.shape[1] == 4:\n return mpl.colors.to_rgba_array(color)\n alpha = np.where(visible(color, axis=1), alpha, np.nan)\n return mpl.colors.to_rgba_array(color, alpha)\n\n # TODO should we be implementing fill here too?\n # (i.e. set fillalpha to 0 when fill=False)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 279, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 280, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = alpha if visible(color) else np.nan\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 281, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color, alpha)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 284, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 285, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = np.where(visible(color, axis=1), alpha, np.nan)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 286, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color, alpha)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 292, "name": "MultiMark", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 24, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 25, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 26, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 27, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 28, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"lines.marker\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 29, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(rc=\"lines.markersize\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 30, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 31, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 32, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"lines.markeredgewidth\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 36, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 38, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 40, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 41, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 42, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 43, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 46, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 46, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 50, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 52, "name": "Line2D", "kind": "ref", "category": "function", "info": " line = mpl.lines.Line2D(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 53, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"x\"].to_numpy(),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 54, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"y\"].to_numpy(),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 65, "name": "add_line", "kind": "ref", "category": "function", "info": " ax.add_line(line)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 67, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 70, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 71, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 72, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 73, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 76, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 76, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 79, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 94, "name": "Line", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 102, "name": "Paths", "kind": "def", "category": "class", "info": "_plot\t_legend_artist"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 106, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 107, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 108, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 109, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 113, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 117, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 127, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 128, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 131, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 141, "name": "LineCollection", "kind": "ref", "category": "function", "info": " lines = mpl.collections.LineCollection(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 145, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines, autolim=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 149, "name": "update_from_data_xy", "kind": "ref", "category": "function", "info": " ax.dataLim.update_from_data_xy(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 153, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 155, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " key = resolve_properties(self, {v: value for v in variables}, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 157, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/lines.py", "rel_fname": "seaborn/_marks/lines.py", "line": 167, "name": "Lines", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 26, "name": "Scatter", "kind": "def", "category": "class", "info": "_resolve_paths\t_resolve_properties\t_plot\t_legend_artist"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 31, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"scatter.marker\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 32, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 33, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(3, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 34, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 35, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False) # TODO auto alpha?\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 36, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 38, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillalpha: MappableFloat = Mappable(.2, grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 40, "name": "_resolve_paths", "kind": "def", "category": "function", "info": " def _resolve_paths(self, data):\n\n paths = []\n path_cache = {}\n marker = data[\"marker\"]\n\n def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n\n if isinstance(data, dict): # TODO need a better way to check\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"linewidth\"] = resolved[\"stroke\"]\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n\n # Because only Dot, and not Scatter, has an edgestyle\n resolved.setdefault(\"edgestyle\", (0, None))\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for keys, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 46, "name": "get_transformed_path", "kind": "def", "category": "function", "info": " def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n\n if isinstance(data, dict): # TODO need a better way to check\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"linewidth\"] = resolved[\"stroke\"]\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n\n # Because only Dot, and not Scatter, has an edgestyle\n resolved.setdefault(\"edgestyle\", (0, None))\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for keys, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 47, "name": "get_path", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 47, "name": "transformed", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 50, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " return get_transformed_path(marker)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 54, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " path_cache[m] = get_transformed_path(m)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 58, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n\n if isinstance(data, dict): # TODO need a better way to check\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"linewidth\"] = resolved[\"stroke\"]\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n\n # Because only Dot, and not Scatter, has an edgestyle\n resolved.setdefault(\"edgestyle\", (0, None))\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for keys, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 60, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 61, "name": "_resolve_paths", "kind": "ref", "category": "function", "info": " resolved[\"path\"] = self._resolve_paths(resolved)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 64, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = resolved[\"marker\"].is_filled()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 66, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 72, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 73, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 87, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for keys, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 93, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 96, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " data = self._resolve_properties(data, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 98, "name": "PathCollection", "kind": "ref", "category": "function", "info": " points = mpl.collections.PathCollection(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 107, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 109, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(points)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 111, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 116, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " res = self._resolve_properties(key, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 118, "name": "PathCollection", "kind": "ref", "category": "function", "info": " return mpl.collections.PathCollection(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 125, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 131, "name": "Dot", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 135, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(\"o\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 136, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 137, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 138, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 139, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 140, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(depend=\"alpha\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 141, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(6, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 142, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(.5, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 143, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 145, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n\n if isinstance(data, dict): # TODO need a better way to check\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"linewidth\"] = resolved[\"stroke\"]\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n\n # Because only Dot, and not Scatter, has an edgestyle\n resolved.setdefault(\"edgestyle\", (0, None))\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for keys, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 147, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 156, "name": "resolve_color", "kind": "ref", "category": "function", "info": " main_color = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_marks/scatter.py", "rel_fname": "seaborn/_marks/scatter.py", "line": 157, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edge_color = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 28, "name": "SemanticMapping", "kind": "def", "category": "class", "info": "__init__\tmap\t_lookup_single\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 54, "name": "cls", "kind": "ref", "category": "function", "info": " setattr(plotter, method_name, cls(plotter, *args, **kwargs))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 57, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 64, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return [self._lookup_single(k, *args, **kwargs) for k in key]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 66, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return self._lookup_single(key, *args, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 70, "name": "HueMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\tinfer_map_type\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 101, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 114, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, cmap = self.numeric_mapping(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 123, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 132, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 145, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 154, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 161, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 163, "name": "cmap", "kind": "ref", "category": "function", "info": " value = self.cmap(normed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 166, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, palette, norm, input_format, var_type):\n \"\"\"Determine how to implement the mapping.\"\"\"\n if palette in QUAL_PALETTES:\n map_type = \"categorical\"\n elif norm is not None:\n map_type = \"numeric\"\n elif isinstance(palette, (dict, list)):\n map_type = \"categorical\"\n elif input_format == \"wide\":\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n if len(palette) != n_colors:\n err = \"The palette list has the wrong number of colors.\"\n raise ValueError(err)\n colors = palette\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 181, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n if len(palette) != n_colors:\n err = \"The palette list has the wrong number of colors.\"\n raise ValueError(err)\n colors = palette\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 185, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 202, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n_colors <= len(get_color_cycle()):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 203, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(None, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 205, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 212, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 218, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 226, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 232, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 243, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(palette, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 247, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 249, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 254, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 255, "name": "norm", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 255, "name": "dropna", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 257, "name": "cmap", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 257, "name": "norm", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 263, "name": "SizeMapping", "kind": "def", "category": "class", "info": "__init__\tinfer_map_type\t_lookup_single\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 284, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 292, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, size_range = self.numeric_mapping(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 300, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 310, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 324, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, norm, sizes, var_type):\n\n if norm is not None:\n map_type = \"numeric\"\n elif isinstance(sizes, (dict, list)):\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def _lookup_single(self, key):\n\n try:\n value = self.lookup_table[key]\n except KeyError:\n normed = self.norm(key)\n if np.ma.is_masked(normed):\n normed = np.nan\n value = self.size_range[0] + normed * np.ptp(self.size_range)\n return value\n\n def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n if len(sizes) != len(levels):\n err = \"The `sizes` list has the wrong number of values.\"\n raise ValueError(err)\n\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 335, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 340, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 341, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 346, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n if len(sizes) != len(levels):\n err = \"The `sizes` list has the wrong number of values.\"\n raise ValueError(err)\n\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 348, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 407, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 420, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 455, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 458, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 470, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 471, "name": "norm", "kind": "ref", "category": "function", "info": " norm(levels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 474, "name": "norm", "kind": "ref", "category": "function", "info": " sizes_scaled = norm(levels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 488, "name": "StyleMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\t_map_attributes"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 511, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data) == \"datetime\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 515, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 517, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " markers = self._map_attributes(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 518, "name": "unique_markers", "kind": "ref", "category": "function", "info": " markers, levels, unique_markers(len(levels)), \"markers\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 520, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " dashes = self._map_attributes(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 521, "name": "unique_dashes", "kind": "ref", "category": "function", "info": " dashes, levels, unique_dashes(len(levels)), \"dashes\",\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 529, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 530, "name": "get_path", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 530, "name": "transformed", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 530, "name": "get_transform", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 531, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_markers.append(m.is_filled())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 554, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key, attr=None):\n \"\"\"Get attribute(s) for a given data point.\"\"\"\n if attr is None:\n value = self.lookup_table[key]\n else:\n value = self.lookup_table[key][attr]\n return value\n\n def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n if len(levels) != len(arg):\n err = f\"The `{attr}` argument has the wrong number of values\"\n raise ValueError(err)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 562, "name": "_map_attributes", "kind": "def", "category": "function", "info": " def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n if len(levels) != len(arg):\n err = f\"The `{attr}` argument has the wrong number of values\"\n raise ValueError(err)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 589, "name": "VectorPlotter", "kind": "def", "category": "class", "info": "__init__\tget_semantics\thas_xy_data\tvar_levels\tassign_variables\t_assign_variables_wideform\t_assign_variables_longform\titer_data\tcomp_data\t_get_axes\t_attach\t_log_scaled\t_add_axis_labels\tscale_native\tscale_numeric\tscale_datetime\tscale_categorical"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 616, "name": "assign_variables", "kind": "ref", "category": "function", "info": " self.assign_variables(data, variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 628, "name": "get_semantics", "kind": "def", "category": "function", "info": " def get_semantics(cls, kwargs, semantics=None):\n \"\"\"Subset a dictionary` arguments with known semantic variables.\"\"\"\n # TODO this should be get_variables since we have included x and y\n if semantics is None:\n semantics = cls.semantics\n variables = {}\n for key, val in kwargs.items():\n if key in semantics and val is not None:\n variables[key] = val\n return variables\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 640, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 645, "name": "var_levels", "kind": "def", "category": "function", "info": " def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 665, "name": "assign_variables", "kind": "def", "category": "function", "info": " def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 672, "name": "_assign_variables_wideform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_wideform(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 677, "name": "_assign_variables_longform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_longform(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 684, "name": "variable_type", "kind": "ref", "category": "function", "info": " v: variable_type(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 693, "name": "_assign_variables_wideform", "kind": "def", "category": "function", "info": " def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 794, "name": "variable_type", "kind": "ref", "category": "function", "info": " k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 806, "name": "add_categories", "kind": "ref", "category": "function", "info": " wide_data.columns = wide_data.columns.add_categories(\"@index\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 811, "name": "to_series", "kind": "ref", "category": "function", "info": " wide_data[\"@index\"] = wide_data.index.to_series()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 835, "name": "_assign_variables_longform", "kind": "def", "category": "function", "info": " def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 879, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = data.index.to_frame()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 949, "name": "iter_data", "kind": "def", "category": "function", "info": " def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1009, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1021, "name": "convert_units", "kind": "ref", "category": "function", "info": " levels[axis] = converter.convert_units(levels[axis])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1027, "name": "date2num", "kind": "ref", "category": "function", "info": " levels[axis] = mpl.dates.date2num(levels[axis])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1028, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1051, "name": "get_group", "kind": "ref", "category": "function", "info": " data_subset = grouped_data.get_group(pd_key)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1071, "name": "comp_data", "kind": "def", "category": "function", "info": " def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1085, "name": "drop", "kind": "ref", "category": "function", "info": " .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1096, "name": "dropna", "kind": "ref", "category": "function", "info": " orig = orig.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1102, "name": "convert_units", "kind": "ref", "category": "function", "info": " comp = pd.to_numeric(converter.convert_units(orig))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1103, "name": "get_scale", "kind": "ref", "category": "function", "info": " if converter.get_scale() == \"log\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1116, "name": "_get_axes", "kind": "def", "category": "function", "info": " def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1131, "name": "_attach", "kind": "def", "category": "function", "info": " def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1156, "name": "flatten", "kind": "ref", "category": "function", "info": " ax_list = obj.axes.flatten()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1211, "name": "iter_data", "kind": "ref", "category": "function", "info": " for axes_vars, axes_data in self.iter_data():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1212, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(axes_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1237, "name": "categorical_order", "kind": "ref", "category": "function", "info": " seed_data = categorical_order(seed_data, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1238, "name": "update_units", "kind": "ref", "category": "function", "info": " converter.update_units(seed_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1259, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1261, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.3\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1261, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.3\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1262, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", base=scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1264, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", **{f\"base{axis}\": scale})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1270, "name": "set_inverted", "kind": "ref", "category": "function", "info": " ax.yaxis.set_inverted(True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1272, "name": "yaxis_inverted", "kind": "ref", "category": "function", "info": " if not ax.yaxis_inverted():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1273, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1277, "name": "_log_scaled", "kind": "def", "category": "function", "info": " def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1283, "name": "flatten", "kind": "ref", "category": "function", "info": " axes_list = self.facets.axes.flatten()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1290, "name": "get_scale", "kind": "ref", "category": "function", "info": " log_scaled.append(data_axis.get_scale() == \"log\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1297, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1303, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " if not ax.get_xlabel():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1304, "name": "get_visible", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1304, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1305, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1306, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " if not ax.get_ylabel():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1307, "name": "get_visible", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1307, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1308, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1315, "name": "scale_native", "kind": "def", "category": "function", "info": " def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1321, "name": "scale_numeric", "kind": "def", "category": "function", "info": " def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1328, "name": "scale_datetime", "kind": "def", "category": "function", "info": " def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1335, "name": "scale_categorical", "kind": "def", "category": "function", "info": " def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1377, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"axis\", [\"x\", \"y\"], axis)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1394, "name": "sort_values", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1404, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = pd.Index(categorical_order(cat_data, order))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1414, "name": "astype", "kind": "ref", "category": "function", "info": " cat_data = cat_data.astype(str)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1415, "name": "astype", "kind": "ref", "category": "function", "info": " order = order.astype(str)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1429, "name": "VariableType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1449, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(vector, boolean_type=\"numeric\"):\n \"\"\"\n Determine whether a vector contains numeric, categorical, or datetime data.\n\n This function differs from the pandas typing API in two ways:\n\n - Python sequences or object-typed PyData objects are considered numeric if\n all of their entries are numeric.\n - String or mixed-type data are considered categorical even if not\n explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.\n\n Parameters\n ----------\n vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence\n Input data to test.\n boolean_type : 'numeric' or 'categorical'\n Type to use for vectors containing only 0s and 1s (and NAs).\n\n Returns\n -------\n var_type : 'numeric', 'categorical', or 'datetime'\n Name identifying the type of data in the vector.\n \"\"\"\n\n # If a categorical dtype is set, infer categorical\n if pd.api.types.is_categorical_dtype(vector):\n return VariableType(\"categorical\")\n\n # Special-case all-na data, which is always \"numeric\"\n if pd.isna(vector).all():\n return VariableType(\"numeric\")\n\n # Special-case binary/boolean data, allow caller to determine\n # This triggers a numpy warning when vector has strings/objects\n # https://github.com/numpy/numpy/issues/6784\n # Because we reduce with .all(), we are agnostic about whether the\n # comparison returns a scalar or vector, so we will ignore the warning.\n # It triggers a separate DeprecationWarning when the vector has datetimes:\n # https://github.com/numpy/numpy/issues/13548\n # This is considered a bug by numpy and will likely go away.\n with warnings.catch_warnings():\n warnings.simplefilter(\n action='ignore', category=(FutureWarning, DeprecationWarning)\n )\n if np.isin(vector, [0, 1, np.nan]).all():\n return VariableType(boolean_type)\n\n # Defer to positive pandas tests\n if pd.api.types.is_numeric_dtype(vector):\n return VariableType(\"numeric\")\n\n if pd.api.types.is_datetime64_dtype(vector):\n return VariableType(\"datetime\")\n\n # --- If we get to here, we need to check the entries\n\n # Check for a collection where everything is a number\n\n def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1474, "name": "is_categorical_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_categorical_dtype(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1475, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1479, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1494, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(boolean_type)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1497, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1498, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1500, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1501, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1507, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1513, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1514, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1518, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1524, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1525, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1529, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1532, "name": "infer_orient", "kind": "def", "category": "function", "info": "def infer_orient(x=None, y=None, orient=None, require_numeric=True):\n \"\"\"Determine how the plot should be oriented based on the data.\n\n For historical reasons, the convention is to call a plot \"horizontally\"\n or \"vertically\" oriented based on the axis representing its dependent\n variable. Practically, this is used when determining the axis for\n numerical aggregation.\n\n Parameters\n ----------\n x, y : Vector data or None\n Positional data vectors for the plot.\n orient : string or None\n Specified orientation, which must start with \"v\" or \"h\" if not None.\n require_numeric : bool\n If set, raise when the implied dependent variable is not numeric.\n\n Returns\n -------\n orient : \"v\" or \"h\"\n\n Raises\n ------\n ValueError: When `orient` is not None and does not start with \"h\" or \"v\"\n TypeError: When dependent variable is not numeric, with `require_numeric`\n\n \"\"\"\n\n x_type = None if x is None else variable_type(x)\n y_type = None if y is None else variable_type(y)\n\n nonnumeric_dv_error = \"{} orientation requires numeric `{}` variable.\"\n single_var_warning = \"{} orientation ignored with only `{}` specified.\"\n\n if x is None:\n if str(orient).startswith(\"h\"):\n warnings.warn(single_var_warning.format(\"Horizontal\", \"y\"))\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"v\"\n\n elif y is None:\n if str(orient).startswith(\"v\"):\n warnings.warn(single_var_warning.format(\"Vertical\", \"x\"))\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"h\"\n\n elif str(orient).startswith(\"v\"):\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"v\"\n\n elif str(orient).startswith(\"h\"):\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"h\"\n\n elif orient is not None:\n err = (\n \"`orient` must start with 'v' or 'h' or be None, \"\n f\"but `{repr(orient)}` was passed.\"\n )\n raise ValueError(err)\n\n elif x_type != \"categorical\" and y_type == \"categorical\":\n return \"h\"\n\n elif x_type != \"numeric\" and y_type == \"numeric\":\n return \"v\"\n\n elif x_type == \"numeric\" and y_type != \"numeric\":\n return \"h\"\n\n elif require_numeric and \"numeric\" not in (x_type, y_type):\n err = \"Neither the `x` nor `y` variable appears to be numeric.\"\n raise TypeError(err)\n\n else:\n return \"v\"\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1560, "name": "variable_type", "kind": "ref", "category": "function", "info": " x_type = None if x is None else variable_type(x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1561, "name": "variable_type", "kind": "ref", "category": "function", "info": " y_type = None if y is None else variable_type(y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1614, "name": "unique_dashes", "kind": "def", "category": "function", "info": "def unique_dashes(n):\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes = [\n \"\",\n (4, 1.5),\n (1, 1),\n (3, 1.25, 1.5, 1.25),\n (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(\n list(a)[1:-1][::-1],\n list(b)[1:-1]\n ))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return dashes[:n]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1665, "name": "unique_markers", "kind": "def", "category": "function", "info": "def unique_markers(n):\n \"\"\"Build an arbitrarily long list of unique marker styles for points.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\",\n \"X\",\n (4, 0, 45),\n \"P\",\n (4, 0, 0),\n (4, 1, 0),\n \"^\",\n (4, 1, 45),\n \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([\n (s + 1, 1, a),\n (s + 1, 0, a),\n (s, 1, 0),\n (s, 0, 0),\n ])\n s += 1\n\n # Convert to MarkerStyle object, using only exactly what we need\n # markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]\n\n return markers[:n]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1711, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector, order=None):\n \"\"\"Return a list of unique data values.\n\n Determine an ordered list of levels in ``values``.\n\n Parameters\n ----------\n vector : list, array, Categorical, or Series\n Vector of \"categorical\" values\n order : list-like, optional\n Desired order of category levels to override the order determined\n from the ``values`` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is None:\n if hasattr(vector, \"categories\"):\n order = vector.categories\n else:\n try:\n order = vector.cat.categories\n except (TypeError, AttributeError):\n\n try:\n order = vector.unique()\n except AttributeError:\n order = pd.unique(vector)\n\n if variable_type(vector) == \"numeric\":\n order = np.sort(order)\n\n order = filter(pd.notnull, order)\n return list(order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1743, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(vector) == \"numeric\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 40, "name": "KDE", "kind": "def", "category": "class", "info": "__init__\t_define_support_grid\t_define_support_univariate\t_define_support_bivariate\tdefine_support\t_fit\t_eval_univariate\t_eval_bivariate\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 88, "name": "_define_support_grid", "kind": "def", "category": "function", "info": " def _define_support_grid(self, x, bw, cut, clip, gridsize):\n \"\"\"Create the grid of evaluation points depending for vector x.\"\"\"\n clip_lo = -np.inf if clip[0] is None else clip[0]\n clip_hi = +np.inf if clip[1] is None else clip[1]\n gridmin = max(x.min() - bw * cut, clip_lo)\n gridmax = min(x.max() + bw * cut, clip_hi)\n return np.linspace(gridmin, gridmax, gridsize)\n\n def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 96, "name": "_define_support_univariate", "kind": "def", "category": "function", "info": " def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 98, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 100, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid = self._define_support_grid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 105, "name": "_define_support_bivariate", "kind": "def", "category": "function", "info": " def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 111, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 114, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid1 = self._define_support_grid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 117, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid2 = self._define_support_grid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 123, "name": "define_support", "kind": "def", "category": "function", "info": " def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 126, "name": "_define_support_univariate", "kind": "ref", "category": "function", "info": " support = self._define_support_univariate(x1, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 128, "name": "_define_support_bivariate", "kind": "ref", "category": "function", "info": " support = self._define_support_bivariate(x1, x2, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 135, "name": "_fit", "kind": "def", "category": "function", "info": " def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 146, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 150, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x, cache=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 152, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 160, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde(support)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 164, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 168, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x1, x2, cache=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 170, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 184, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 191, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 193, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 196, "name": "Histogram", "kind": "def", "category": "class", "info": "__init__\t_define_bin_edges\tdefine_bin_params\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 240, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", stat_choices, stat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 251, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n if binrange is None:\n start, stop = x.min(), x.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n # Handle roundoff error (maybe there is a less clumsy way?)\n if bin_edges.max() < stop or len(bin_edges) < 2:\n bin_edges = np.append(bin_edges, bin_edges.max() + step)\n else:\n bin_edges = np.histogram_bin_edges(\n x, bins, binrange, weights,\n )\n return bin_edges\n\n def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 272, "name": "define_bin_params", "kind": "def", "category": "function", "info": " def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 276, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 321, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges.append(self._define_bin_edges(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 332, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 336, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x1, x2, cache=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 350, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 352, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 354, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / area\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 364, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 368, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 376, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 378, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 380, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / np.diff(bin_edges)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 393, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 395, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 398, "name": "ECDF", "kind": "def", "category": "class", "info": "__init__\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 411, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", [\"count\", \"proportion\"], stat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 415, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 419, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 446, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 448, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 451, "name": "EstimateAggregator", "kind": "def", "category": "class", "info": "__init__\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 472, "name": "_validate_errorbar_arg", "kind": "ref", "category": "function", "info": " method, level = _validate_errorbar_arg(errorbar)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 481, "name": "agg", "kind": "ref", "category": "function", "info": " estimate = vals.agg(self.estimator)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 491, "name": "error_method", "kind": "ref", "category": "function", "info": " err_min, err_max = self.error_method(vals)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 498, "name": "sem", "kind": "ref", "category": "function", "info": " half_interval = vals.sem() * self.error_level\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 503, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(vals, self.error_level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 506, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 507, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(boots, self.error_level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 512, "name": "_percentile_interval", "kind": "def", "category": "function", "info": "def _percentile_interval(data, width):\n \"\"\"Return a percentile interval from data of a given width.\"\"\"\n edge = (100 - width) / 2\n percentiles = edge, 100 - edge\n return np.percentile(data, percentiles)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 519, "name": "_validate_errorbar_arg", "kind": "def", "category": "function", "info": "def _validate_errorbar_arg(arg):\n \"\"\"Check type and value of errorbar argument and assign default level.\"\"\"\n DEFAULT_LEVELS = {\n \"ci\": 95,\n \"pi\": 95,\n \"se\": 1,\n \"sd\": 1,\n }\n\n usage = \"`errorbar` must be a callable, string, or (string, number) tuple\"\n\n if arg is None:\n return None, None\n elif callable(arg):\n return arg, None\n elif isinstance(arg, str):\n method = arg\n level = DEFAULT_LEVELS.get(method, None)\n else:\n try:\n method, level = arg\n except (ValueError, TypeError) as err:\n raise err.__class__(usage) from err\n\n _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n if level is not None and not isinstance(level, Number):\n raise TypeError(usage)\n\n return method, level\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 543, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 14, "name": "Agg", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 35, "name": "agg", "kind": "ref", "category": "function", "info": " .agg(data, {var: self.func})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 37, "name": "dropna", "kind": "ref", "category": "function", "info": " .dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 38, "name": "reset_index", "kind": "ref", "category": "function", "info": " .reset_index(drop=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 44, "name": "Est", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 61, "name": "Rolling", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 13, "name": "Stat", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "__future__", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "annotations", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "dataclasses", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "dataclass", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "typing", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "ClassVar", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "typing", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "TYPE_CHECKING", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "TYPE_CHECKING", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "pandas", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "DataFrame", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "seaborn", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "_core", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "groupby", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "GroupBy", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "seaborn", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "_core", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "scales", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "Scale", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "@dataclass", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "Stat", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "group_by_orient", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "ClassVar", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "bool", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "__call__", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "self", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "data", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "DataFrame", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "groupby", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "GroupBy", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "orient", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "str", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "scales", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "dict", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "str", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "Scale", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "DataFrame", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "data", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 16, "name": "Hist", "kind": "def", "category": "class", "info": "_define_bin_edges\t_define_bin_params\t_get_bins_and_eval\t_eval\t_normalize\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 33, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n vals = vals.dropna()\n\n if binrange is None:\n start, stop = vals.min(), vals.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n else:\n bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n\n # TODO warning or cap on too many bins?\n\n return bin_edges\n\n def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weight, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n scale_type = scales[orient].scale_type\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 35, "name": "dropna", "kind": "ref", "category": "function", "info": " vals = vals.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 54, "name": "_define_bin_params", "kind": "def", "category": "function", "info": " def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weight, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n scale_type = scales[orient].scale_type\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 63, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 76, "name": "_get_bins_and_eval", "kind": "def", "category": "function", "info": " def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n scale_type = scales[orient].scale_type\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 78, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 79, "name": "apply", "kind": "ref", "category": "function", "info": " return groupby.apply(data, self._eval, orient, bin_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 81, "name": "_eval", "kind": "def", "category": "function", "info": " def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n scale_type = scales[orient].scale_type\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 97, "name": "_normalize", "kind": "def", "category": "function", "info": " def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n scale_type = scales[orient].scale_type\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 103, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 105, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 107, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / data[\"space\"]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 115, "name": "assign", "kind": "ref", "category": "function", "info": " return data.assign(**{other: hist})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 122, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 123, "name": "apply", "kind": "ref", "category": "function", "info": " data = groupby.apply(data, self._eval, orient, bin_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 126, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 128, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(self.common_bins)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 129, "name": "apply", "kind": "ref", "category": "function", "info": " data = bin_groupby.apply(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 139, "name": "_normalize", "kind": "ref", "category": "function", "info": " data = self._normalize(data, orient)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 146, "name": "GroupBy", "kind": "ref", "category": "function", "info": " data = GroupBy(norm_grouper).apply(data, normalize)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/histograms.py", "rel_fname": "seaborn/_stats/histograms.py", "line": 146, "name": "apply", "kind": "ref", "category": "function", "info": " data = GroupBy(norm_grouper).apply(data, normalize)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 10, "name": "PolyFit", "kind": "def", "category": "class", "info": "_fit_predict\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 21, "name": "_fit_predict", "kind": "def", "category": "function", "info": " def _fit_predict(self, data):\n\n x = data[\"x\"]\n y = data[\"y\"]\n if x.nunique() <= self.order:\n # TODO warn?\n xx = yy = []\n else:\n p = np.polyfit(x, y, self.order)\n xx = np.linspace(x.min(), x.max(), self.gridsize)\n yy = np.polyval(p, xx)\n\n return pd.DataFrame(dict(x=xx, y=yy))\n\n # TODO we should have a way of identifying the method that will be applied\n # and then only define __call__ on a base-class of stats with this pattern\n\n def __call__(self, data, groupby, orient, scales):\n\n return groupby.apply(data, self._fit_predict)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 25, "name": "nunique", "kind": "ref", "category": "function", "info": " if x.nunique() <= self.order:\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 40, "name": "apply", "kind": "ref", "category": "function", "info": " return groupby.apply(data, self._fit_predict)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 44, "name": "OLSFit", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 6, "name": "bootstrap", "kind": "def", "category": "function", "info": "def bootstrap(*args, **kwargs):\n \"\"\"Resample one or more arrays with replacement and store aggregate values.\n\n Positional arguments are a sequence of arrays to bootstrap along the first\n axis and pass to a summary function.\n\n Keyword arguments:\n n_boot : int, default=10000\n Number of iterations\n axis : int, default=None\n Will pass axis to ``func`` as a keyword argument.\n units : array, default=None\n Array of sampling unit IDs. When used the bootstrap resamples units\n and then observations within units instead of individual\n datapoints.\n func : string or callable, default=\"mean\"\n Function to call on the args that are passed in. If string, uses as\n name of function in the numpy namespace. If nans are present in the\n data, will try to use nan-aware version of named function.\n seed : Generator | SeedSequence | RandomState | int | None\n Seed for the random number generator; useful if you want\n reproducible resamples.\n\n Returns\n -------\n boot_dist: array\n array of bootstrapped statistic values\n\n \"\"\"\n # Ensure list of arrays are same length\n if len(np.unique(list(map(len, args)))) > 1:\n raise ValueError(\"All input arrays must have the same length\")\n n = len(args[0])\n\n # Default keyword arguments\n n_boot = kwargs.get(\"n_boot\", 10000)\n func = kwargs.get(\"func\", \"mean\")\n axis = kwargs.get(\"axis\", None)\n units = kwargs.get(\"units\", None)\n random_seed = kwargs.get(\"random_seed\", None)\n if random_seed is not None:\n msg = \"`random_seed` has been renamed to `seed` and will be removed\"\n warnings.warn(msg)\n seed = kwargs.get(\"seed\", random_seed)\n if axis is None:\n func_kwargs = dict()\n else:\n func_kwargs = dict(axis=axis)\n\n # Initialize the resampler\n rng = _handle_random_seed(seed)\n\n # Coerce to arrays\n args = list(map(np.asarray, args))\n if units is not None:\n units = np.asarray(units)\n\n if isinstance(func, str):\n\n # Allow named numpy functions\n f = getattr(np, func)\n\n # Try to use nan-aware version of function if necessary\n missing_data = np.isnan(np.sum(np.column_stack(args)))\n\n if missing_data and not func.startswith(\"nan\"):\n nanf = getattr(np, f\"nan{func}\", None)\n if nanf is None:\n msg = f\"Data contain nans but no nan-aware version of `{func}` found\"\n warnings.warn(msg, UserWarning)\n else:\n f = nanf\n\n else:\n f = func\n\n # Handle numpy changes\n try:\n integers = rng.integers\n except AttributeError:\n integers = rng.randint\n\n # Do the bootstrap\n if units is not None:\n return _structured_bootstrap(args, n_boot, units, f,\n func_kwargs, integers)\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n sample = [a.take(resampler, axis=0) for a in args]\n boot_dist.append(f(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 56, "name": "_handle_random_seed", "kind": "ref", "category": "function", "info": " rng = _handle_random_seed(seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 90, "name": "_structured_bootstrap", "kind": "ref", "category": "function", "info": " return _structured_bootstrap(args, n_boot, units, f,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 95, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 97, "name": "f", "kind": "ref", "category": "function", "info": " boot_dist.append(f(*sample, **func_kwargs))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 101, "name": "_structured_bootstrap", "kind": "def", "category": "function", "info": "def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):\n \"\"\"Resample units instead of datapoints.\"\"\"\n unique_units = np.unique(units)\n n_units = len(unique_units)\n\n args = [[a[units == unit] for unit in unique_units] for a in args]\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n_units, n_units, dtype=np.intp)\n sample = [[a[i] for i in resampler] for a in args]\n lengths = map(len, sample[0])\n resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]\n sample = list(map(np.concatenate, sample))\n boot_dist.append(func(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 110, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n_units, n_units, dtype=np.intp)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 113, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 116, "name": "func", "kind": "ref", "category": "function", "info": " boot_dist.append(func(*sample, **func_kwargs))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 120, "name": "_handle_random_seed", "kind": "def", "category": "function", "info": "def _handle_random_seed(seed=None):\n \"\"\"Given a seed in one of many formats, return a random number generator.\n\n Generalizes across the numpy 1.17 changes, preferring newer functionality.\n\n \"\"\"\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n try:\n # General interface for seeding on numpy >= 1.17\n rng = np.random.default_rng(seed)\n except AttributeError:\n # We are on numpy < 1.17, handle options ourselves\n if isinstance(seed, (numbers.Integral, np.integer)):\n rng = np.random.RandomState(seed)\n elif seed is None:\n rng = np.random.RandomState()\n else:\n err = \"{} cannot be used to seed the randomn number generator\"\n raise ValueError(err.format(seed))\n return rng\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 131, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 135, "name": "RandomState", "kind": "ref", "category": "function", "info": " rng = np.random.RandomState(seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 137, "name": "RandomState", "kind": "ref", "category": "function", "info": " rng = np.random.RandomState()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 23, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 28, "name": "_BaseGrid", "kind": "def", "category": "class", "info": "set\tfig\tfigure\tsavefig"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 39, "name": "fig", "kind": "def", "category": "function", "info": " def fig(self):\n \"\"\"DEPRECATED: prefer the `figure` property.\"\"\"\n # Grid.figure is preferred because it matches the Axes attribute name.\n # But as the maintanace burden on having this property is minimal,\n # let's be slow about formally deprecating it. For now just note its deprecation\n # in the docstring; add a warning in version 0.13, and eventually remove it.\n return self._figure\n\n @property\n def figure(self):\n \"\"\"Access the :class:`matplotlib.figure.Figure` object underlying the grid.\"\"\"\n return self._figure\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 65, "name": "Grid", "kind": "def", "category": "class", "info": "__init__\ttight_layout\tadd_legend\t_update_legend_data\t_get_palette\tlegend"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 87, "name": "add_legend", "kind": "def", "category": "function", "info": " def add_legend(self, legend_data=None, title=None, label_order=None,\n adjust_subtitles=False, **kwargs):\n \"\"\"Draw a legend, maybe placing it outside axes and resizing the figure.\n\n Parameters\n ----------\n legend_data : dict\n Dictionary mapping label names (or two-element tuples where the\n second element is a label name) to matplotlib artist handles. The\n default reads from ``self._legend_data``.\n title : string\n Title for the legend. The default reads from ``self._hue_var``.\n label_order : list of labels\n The order that the legend entries should appear in. The default\n reads from ``self.hue_names``.\n adjust_subtitles : bool\n If True, modify entries with invisible artists to left-align\n the labels and set the font size to that of a title.\n kwargs : key, value pairings\n Other keyword arguments are passed to the underlying legend methods\n on the Figure or Axes object.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n # Find the data for the legend\n if legend_data is None:\n legend_data = self._legend_data\n if label_order is None:\n if self.hue_names is None:\n label_order = list(legend_data.keys())\n else:\n label_order = list(map(utils.to_utf8, self.hue_names))\n\n blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n handles = [legend_data.get(l, blank_handle) for l in label_order]\n title = self._hue_var if title is None else title\n title_size = mpl.rcParams[\"legend.title_fontsize\"]\n\n # Unpack nested labels from a hierarchical legend\n labels = []\n for entry in label_order:\n if isinstance(entry, tuple):\n _, label = entry\n else:\n label = entry\n labels.append(label)\n\n # Set default legend kwargs\n kwargs.setdefault(\"scatterpoints\", 1)\n\n if self._legend_out:\n\n kwargs.setdefault(\"frameon\", False)\n kwargs.setdefault(\"loc\", \"center right\")\n\n # Draw a full-figure legend outside the grid\n figlegend = self._figure.legend(handles, labels, **kwargs)\n\n self._legend = figlegend\n figlegend.set_title(title, prop={\"size\": title_size})\n\n if adjust_subtitles:\n adjust_legend_subtitles(figlegend)\n\n # Draw the plot to set the bounding boxes correctly\n _draw_figure(self._figure)\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n fig_width, fig_height = self._figure.get_size_inches()\n self._figure.set_size_inches(fig_width + legend_width, fig_height)\n\n # Draw the plot again to get the new transformations\n _draw_figure(self._figure)\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n space_needed = legend_width / (fig_width + legend_width)\n margin = .04 if self._margin_titles else .01\n self._space_needed = margin + space_needed\n right = 1 - self._space_needed\n\n # Place the subplot axes to give space for the legend\n self._figure.subplots_adjust(right=right)\n self._tight_layout_rect[2] = right\n\n else:\n # Draw a legend in the first axis\n ax = self.axes.flat[0]\n kwargs.setdefault(\"loc\", \"best\")\n\n leg = ax.legend(handles, labels, **kwargs)\n leg.set_title(title, prop={\"size\": title_size})\n self._legend = leg\n\n if adjust_subtitles:\n adjust_legend_subtitles(leg)\n\n return self\n\n def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = ax.legend_.legendHandles\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 124, "name": "Patch", "kind": "ref", "category": "function", "info": " blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 150, "name": "set_title", "kind": "ref", "category": "function", "info": " figlegend.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 153, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(figlegend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 156, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 159, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 160, "name": "get_size_inches", "kind": "ref", "category": "function", "info": " fig_width, fig_height = self._figure.get_size_inches()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 161, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " self._figure.set_size_inches(fig_width + legend_width, fig_height)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 164, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 167, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 183, "name": "set_title", "kind": "ref", "category": "function", "info": " leg.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 187, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(leg)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 191, "name": "_update_legend_data", "kind": "def", "category": "function", "info": " def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = ax.legend_.legendHandles\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 199, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in ax.legend_.texts]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 202, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, labels = ax.get_legend_handles_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 210, "name": "_get_palette", "kind": "def", "category": "function", "info": " def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 213, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(n_colors=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 216, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 221, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 223, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 225, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 230, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(color_names, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 234, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 236, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(colors, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 308, "name": "FacetGrid", "kind": "def", "category": "class", "info": "__init__\tfacet_data\tmap\tmap_dataframe\t_facet_color\t_facet_plot\t_finalize_grid\tfacet_axis\tdespine\tset_axis_labels\tset_xlabels\tset_ylabels\tset_xticklabels\tset_yticklabels\tset_titles\trefline\taxes\tax\taxes_dict\t_inner_axes\t_left_axes\t_not_left_axes\t_bottom_axes\t_not_bottom_axes"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 335, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 337, "name": "_get_palette", "kind": "ref", "category": "function", "info": " colors = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 343, "name": "categorical_order", "kind": "ref", "category": "function", "info": " row_names = categorical_order(data[row], row_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 348, "name": "categorical_order", "kind": "ref", "category": "function", "info": " col_names = categorical_order(data[col], col_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 429, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 435, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 478, "name": "set_titles", "kind": "ref", "category": "function", "info": " self.set_titles()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 482, "name": "despine", "kind": "ref", "category": "function", "info": " self.despine()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 486, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 487, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 488, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 489, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 493, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 494, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 495, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 496, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 590, "name": "facet_data", "kind": "def", "category": "function", "info": " def facet_data(self):\n \"\"\"Generator for name indices and data subsets for each facet.\n\n Yields\n ------\n (i, j, k), data_ijk : tuple of ints, DataFrame\n The ints provide an index into the {row, col, hue}_names attribute,\n and the dataframe contains a subset of the full data corresponding\n to each facet. The generator yields subsets that correspond with\n the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`\n is None.\n\n \"\"\"\n data = self.data\n\n # Construct masks for the row variable\n if self.row_names:\n row_masks = [data[self._row_var] == n for n in self.row_names]\n else:\n row_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the column variable\n if self.col_names:\n col_masks = [data[self._col_var] == n for n in self.col_names]\n else:\n col_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the hue variable\n if self.hue_names:\n hue_masks = [data[self._hue_var] == n for n in self.hue_names]\n else:\n hue_masks = [np.repeat(True, len(self.data))]\n\n # Here is the main generator loop\n for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),\n enumerate(col_masks),\n enumerate(hue_masks)):\n data_ijk = data[row & col & hue & self._not_na]\n yield (i, j, k), data_ijk\n\n def map(self, func, *args, **kwargs):\n \"\"\"Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # How we use the function depends on where it comes from\n func_module = str(getattr(func, \"__module__\", \"\"))\n\n # Check for categorical plots without order information\n if func_module == \"seaborn.categorical\":\n if \"order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n if len(args) == 3 and \"hue_order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`hue_order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not func_module.startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n\n # Get the actual data we are going to plot with\n plot_data = data_ijk[list(args)]\n if self._dropna:\n plot_data = plot_data.dropna()\n plot_args = [v for k, v in plot_data.iteritems()]\n\n # Some matplotlib functions don't handle pandas objects correctly\n if func_module.startswith(\"matplotlib\"):\n plot_args = [v.values for v in plot_args]\n\n # Draw the plot\n self._facet_plot(func, ax, plot_args, kwargs)\n\n # Finalize the annotations and layout\n self._finalize_grid(args[:2])\n\n return self\n\n def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 673, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 681, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 684, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 692, "name": "to_utf8", "kind": "ref", "category": "function", "info": " kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 697, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 698, "name": "iteritems", "kind": "ref", "category": "function", "info": " plot_args = [v for k, v in plot_data.iteritems()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 705, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, plot_args, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 708, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(args[:2])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 712, "name": "map_dataframe", "kind": "def", "category": "function", "info": " def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 745, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 753, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 756, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 768, "name": "dropna", "kind": "ref", "category": "function", "info": " data_ijk = data_ijk.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 772, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, args, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 779, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(axis_labels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 783, "name": "_facet_color", "kind": "def", "category": "function", "info": " def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 791, "name": "_facet_plot", "kind": "def", "category": "function", "info": " def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 801, "name": "func", "kind": "ref", "category": "function", "info": " func(*plot_args, **plot_kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 804, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 806, "name": "_finalize_grid", "kind": "def", "category": "function", "info": " def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 808, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " self.set_axis_labels(*axlabels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 811, "name": "facet_axis", "kind": "def", "category": "function", "info": " def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 825, "name": "despine", "kind": "def", "category": "function", "info": " def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 827, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(self._figure, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 830, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 834, "name": "set_xlabels", "kind": "ref", "category": "function", "info": " self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 837, "name": "set_ylabels", "kind": "ref", "category": "function", "info": " self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 841, "name": "set_xlabels", "kind": "def", "category": "function", "info": " def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 846, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 849, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 852, "name": "set_ylabels", "kind": "def", "category": "function", "info": " def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 857, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 860, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 863, "name": "set_xticklabels", "kind": "def", "category": "function", "info": " def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 866, "name": "get_xticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_xticks()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 867, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(curr_ticks)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 869, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 869, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 871, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = ax.get_xticks()[::step]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 873, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(xticks)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 874, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 876, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(labels, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 879, "name": "set_yticklabels", "kind": "def", "category": "function", "info": " def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 882, "name": "get_yticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_yticks()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 883, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(curr_ticks)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 885, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 885, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 886, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 888, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(labels, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 891, "name": "set_titles", "kind": "def", "category": "function", "info": " def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 930, "name": "to_utf8", "kind": "ref", "category": "function", "info": " row_template = utils.to_utf8(row_template)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "to_utf8", "kind": "ref", "category": "function", "info": " col_template = utils.to_utf8(col_template)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 932, "name": "to_utf8", "kind": "ref", "category": "function", "info": " template = utils.to_utf8(template)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 959, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[0, j].set_title(title, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 969, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, j].set_title(title, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 974, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, 0].set_title(title, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 980, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes.flat[i].set_title(title, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 983, "name": "refline", "kind": "def", "category": "function", "info": " def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1023, "name": "ax", "kind": "def", "category": "function", "info": " def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1034, "name": "axes_dict", "kind": "def", "category": "function", "info": " def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1047, "name": "_inner_axes", "kind": "def", "category": "function", "info": " def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1065, "name": "_left_axes", "kind": "def", "category": "function", "info": " def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1077, "name": "_not_left_axes", "kind": "def", "category": "function", "info": " def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1089, "name": "_bottom_axes", "kind": "def", "category": "function", "info": " def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1106, "name": "_not_bottom_axes", "kind": "def", "category": "function", "info": " def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1123, "name": "PairGrid", "kind": "def", "category": "class", "info": "__init__\tmap\tmap_lower\tmap_upper\tmap_offdiag\tmap_diag\t_map_diag_iter_hue\t_map_bivariate\t_plot_bivariate\t_plot_bivariate_iter_hue\t_add_axis_labels\t_find_numeric_cols"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1203, "name": "_find_numeric_cols", "kind": "ref", "category": "function", "info": " numeric_cols = self._find_numeric_cols(data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1263, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1280, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = hue_order = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1292, "name": "_get_palette", "kind": "ref", "category": "function", "info": " self.palette = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1299, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1300, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1301, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1302, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1307, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1308, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1309, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1310, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1316, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(fig=fig)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1332, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1336, "name": "map_lower", "kind": "def", "category": "function", "info": " def map_lower(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the lower diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.tril_indices_from(self.axes, -1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n # This may change in future matplotlibs\n # See https://github.com/matplotlib/matplotlib/pull/9923\n group = diag_axes[0].get_shared_y_axes()\n for ax in diag_axes[1:]:\n group.join(ax, diag_axes[0])\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1348, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1351, "name": "map_upper", "kind": "def", "category": "function", "info": " def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n # This may change in future matplotlibs\n # See https://github.com/matplotlib/matplotlib/pull/9923\n group = diag_axes[0].get_shared_y_axes()\n for ax in diag_axes[1:]:\n group.join(ax, diag_axes[0])\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1363, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1366, "name": "map_offdiag", "kind": "def", "category": "function", "info": " def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n # This may change in future matplotlibs\n # See https://github.com/matplotlib/matplotlib/pull/9923\n group = diag_axes[0].get_shared_y_axes()\n for ax in diag_axes[1:]:\n group.join(ax, diag_axes[0])\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1378, "name": "map_lower", "kind": "ref", "category": "function", "info": " self.map_lower(func, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1380, "name": "map_upper", "kind": "ref", "category": "function", "info": " self.map_upper(func, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1387, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1390, "name": "map_diag", "kind": "def", "category": "function", "info": " def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n # This may change in future matplotlibs\n # See https://github.com/matplotlib/matplotlib/pull/9923\n group = diag_axes[0].get_shared_y_axes()\n for ax in diag_axes[1:]:\n group.join(ax, diag_axes[0])\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1413, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " diag_ax.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1420, "name": "set_visible", "kind": "ref", "category": "function", "info": " tick.tick1line.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1424, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1426, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1433, "name": "get_shared_y_axes", "kind": "ref", "category": "function", "info": " group = diag_axes[0].get_shared_y_axes()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1441, "name": "_map_diag_iter_hue", "kind": "ref", "category": "function", "info": " return self._map_diag_iter_hue(func, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1469, "name": "func", "kind": "ref", "category": "function", "info": " func(x=vector, **plot_kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1472, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1475, "name": "_map_diag_iter_hue", "kind": "def", "category": "function", "info": " def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1481, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data[var].groupby(self.hue_vals)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1493, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1503, "name": "remove_na", "kind": "ref", "category": "function", "info": " data_k = utils.remove_na(data_k)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1506, "name": "func", "kind": "ref", "category": "function", "info": " func(x=data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1508, "name": "func", "kind": "ref", "category": "function", "info": " func(data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1510, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1514, "name": "_map_bivariate", "kind": "def", "category": "function", "info": " def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1530, "name": "_plot_bivariate", "kind": "ref", "category": "function", "info": " self._plot_bivariate(x_var, y_var, ax, func, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1531, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1536, "name": "_plot_bivariate", "kind": "def", "category": "function", "info": " def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1539, "name": "_plot_bivariate_iter_hue", "kind": "ref", "category": "function", "info": " self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1558, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1571, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1573, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1575, "name": "_plot_bivariate_iter_hue", "kind": "def", "category": "function", "info": " def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1588, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data.groupby(self.hue_vals)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1595, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1601, "name": "dropna", "kind": "ref", "category": "function", "info": " data_k = data_k[axes_vars].dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1613, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1615, "name": "func", "kind": "ref", "category": "function", "info": " func(x, y, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1617, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1619, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n if self._corner:\n self.axes[0, 0].set_ylabel(\"\")\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1622, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1624, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1626, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " self.axes[0, 0].set_ylabel(\"\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1628, "name": "_find_numeric_cols", "kind": "def", "category": "function", "info": " def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1632, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1637, "name": "JointGrid", "kind": "def", "category": "class", "info": "__init__\t_inject_kwargs\tplot\tplot_joint\tplot_marginals\trefline\tset_axis_labels"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1663, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_joint = f.add_subplot(gs[1:, :-1])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1664, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1665, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1673, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1674, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1675, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1676, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1680, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1681, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1682, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1683, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1684, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1685, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1686, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1687, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1692, "name": "VectorPlotter", "kind": "ref", "category": "function", "info": " p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1697, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1699, "name": "get_var", "kind": "def", "category": "function", "info": " def get_var(var):\n vector = plot_data.get(var, None)\n if vector is not None:\n vector = vector.rename(p.variables.get(var, None))\n return vector\n\n self.x = get_var(\"x\")\n self.y = get_var(\"y\")\n self.hue = get_var(\"hue\")\n\n for axis in \"xy\":\n name = p.variables.get(axis, None)\n if name is not None:\n getattr(ax_joint, f\"set_{axis}label\")(name)\n\n if xlim is not None:\n ax_joint.set_xlim(xlim)\n if ylim is not None:\n ax_joint.set_ylim(ylim)\n\n # Store the semantic mapping parameters for axes-level functions\n self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)\n\n # Make the grid look nice\n utils.despine(f)\n if not marginal_ticks:\n utils.despine(ax=ax_marg_x, left=True)\n utils.despine(ax=ax_marg_y, bottom=True)\n for axes in [ax_marg_x, ax_marg_y]:\n for axis in [axes.xaxis, axes.yaxis]:\n axis.label.set_visible(False)\n f.tight_layout()\n f.subplots_adjust(hspace=space, wspace=space)\n\n def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1702, "name": "rename", "kind": "ref", "category": "function", "info": " vector = vector.rename(p.variables.get(var, None))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1705, "name": "get_var", "kind": "ref", "category": "function", "info": " self.x = get_var(\"x\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1706, "name": "get_var", "kind": "ref", "category": "function", "info": " self.y = get_var(\"y\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1707, "name": "get_var", "kind": "ref", "category": "function", "info": " self.hue = get_var(\"hue\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1715, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax_joint.set_xlim(xlim)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1717, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax_joint.set_ylim(ylim)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1723, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(f)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1725, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_x, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1726, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_y, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1729, "name": "set_visible", "kind": "ref", "category": "function", "info": " axis.label.set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1733, "name": "_inject_kwargs", "kind": "def", "category": "function", "info": " def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1762, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " self.plot_marginals(marginal_func, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1763, "name": "plot_joint", "kind": "ref", "category": "function", "info": " self.plot_joint(joint_func, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1766, "name": "plot_joint", "kind": "def", "category": "function", "info": " def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1793, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1796, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, y=self.y, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1798, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, self.y, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1802, "name": "plot_marginals", "kind": "def", "category": "function", "info": " def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1832, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1847, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, ax=self.ax_marg_x, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1850, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, **orient_kw_x, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1853, "name": "func", "kind": "ref", "category": "function", "info": " func(y=self.y, ax=self.ax_marg_y, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1856, "name": "func", "kind": "ref", "category": "function", "info": " func(self.y, **orient_kw_y, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1858, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1858, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1859, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1859, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1863, "name": "refline", "kind": "def", "category": "function", "info": " def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1907, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1926, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1927, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1975, "name": "pairplot", "kind": "def", "category": "function", "info": "def pairplot(\n data, *,\n hue=None, hue_order=None, palette=None,\n vars=None, x_vars=None, y_vars=None,\n kind=\"scatter\", diag_kind=\"auto\", markers=None,\n height=2.5, aspect=1, corner=False, dropna=False,\n plot_kws=None, diag_kws=None, grid_kws=None, size=None,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2084, "name": "PairGrid", "kind": "ref", "category": "function", "info": " grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2114, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(histplot, **diag_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2118, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(kdeplot, **diag_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2128, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(scatterplot, **plot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2131, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(regplot, **plot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2135, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(kdeplot, **plot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2138, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(histplot, **plot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2142, "name": "add_legend", "kind": "ref", "category": "function", "info": " grid.add_legend()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2149, "name": "jointplot", "kind": "def", "category": "function", "info": "def jointplot(\n data=None, *, x=None, y=None, hue=None, kind=\"scatter\",\n height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,\n color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,\n joint_kws=None, marginal_kws=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2191, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", plot_kinds, kind)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2204, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color_rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2205, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " colors = [utils.set_hls_values(color_rgb, l=l) # noqa\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2207, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(colors, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2214, "name": "JointGrid", "kind": "ref", "category": "function", "info": " grid = JointGrid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2228, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(scatterplot, **joint_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2238, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(marg_func, **marginal_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2246, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(histplot, **joint_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2261, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2262, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2268, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(kdeplot, **joint_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2274, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(kdeplot, **marginal_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2278, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " x_bins = min(_freedman_diaconis_bins(grid.x), 50)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2279, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " y_bins = min(_freedman_diaconis_bins(grid.y), 50)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2284, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(plt.hexbin, **joint_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2288, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2294, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2297, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(regplot, **joint_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2302, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(residplot, **joint_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2304, "name": "get_offsets", "kind": "ref", "category": "function", "info": " x, y = grid.ax_joint.collections[0].get_offsets().T\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2306, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2307, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 43, "name": "_CategoricalPlotterNew", "kind": "def", "category": "class", "info": "__init__\t_hue_backcompat\t_palette_without_hue_backcompat\tcat_axis\t_get_gray\t_adjust_cat_axis\t_native_width\t_nested_offsets\tplot_strips\tplot_swarms"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 77, "name": "rename", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.rename(columns={\"x\": \"y\", \"y\": \"x\"})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 95, "name": "infer_orient", "kind": "ref", "category": "function", "info": " self.orient = infer_orient(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 117, "name": "categorical_order", "kind": "ref", "category": "function", "info": " cat_levels = categorical_order(self.plot_data[self.cat_axis], order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 120, "name": "_hue_backcompat", "kind": "def", "category": "function", "info": " def _hue_backcompat(self, color, palette, hue_order, force_hue=False):\n \"\"\"Implement backwards compatibility for hue parametrization.\n\n Note: the force_hue parameter is used so that functions can be shown to\n pass existing tests during refactoring and then tested for new behavior.\n It can be removed after completion of the work.\n\n \"\"\"\n # The original categorical functions applied a palette to the categorical axis\n # by default. We want to require an explicit hue mapping, to be more consistent\n # with how things work elsewhere now. I don't think there's any good way to\n # do this gently -- because it's triggered by the default value of hue=None,\n # users would always get a warning, unless we introduce some sentinel \"default\"\n # argument for this change. That's possible, but asking users to set `hue=None`\n # on every call is annoying.\n # We are keeping the logic for implementing the old behavior in with the current\n # system so that (a) we can punt on that decision and (b) we can ensure that\n # refactored code passes old tests.\n default_behavior = color is None or palette is not None\n if force_hue and \"hue\" not in self.variables and default_behavior:\n self._redundant_hue = True\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables[self.cat_axis]\n self.var_types[\"hue\"] = \"categorical\"\n hue_order = self.var_levels[self.cat_axis]\n\n # Because we convert the categorical axis variable to string,\n # we need to update a dictionary palette too\n if isinstance(palette, dict):\n palette = {str(k): v for k, v in palette.items()}\n\n else:\n self._redundant_hue = False\n\n # Previously, categorical plots had a trick where color= could seed the palette.\n # Because that's an explicit parameterization, we are going to give it one\n # release cycle with a warning before removing.\n if \"hue\" in self.variables and palette is None and color is not None:\n if not isinstance(color, str):\n color = mpl.colors.to_hex(color)\n palette = f\"dark:{color}\"\n msg = (\n \"Setting a gradient palette using color= is deprecated and will be \"\n f\"removed in version 0.13. Set `palette='{palette}'` for same effect.\"\n )\n warnings.warn(msg, FutureWarning)\n\n return palette, hue_order\n\n def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = \"Passing `palette` without assigning `hue` is deprecated.\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables.get(self.cat_axis)\n self.var_types[\"hue\"] = self.var_types.get(self.cat_axis)\n hue_order = self.var_levels.get(self.cat_axis)\n return hue_order\n\n @property\n def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 159, "name": "to_hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.to_hex(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 169, "name": "_palette_without_hue_backcompat", "kind": "def", "category": "function", "info": " def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = \"Passing `palette` without assigning `hue` is deprecated.\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables.get(self.cat_axis)\n self.var_types[\"hue\"] = self.var_types.get(self.cat_axis)\n hue_order = self.var_levels.get(self.cat_axis)\n return hue_order\n\n @property\n def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 182, "name": "cat_axis", "kind": "def", "category": "function", "info": " def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 185, "name": "_get_gray", "kind": "def", "category": "function", "info": " def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 194, "name": "_adjust_cat_axis", "kind": "def", "category": "function", "info": " def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 219, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, n - .5, auto=None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 223, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(n - .5, -.5, auto=None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 226, "name": "_native_width", "kind": "def", "category": "function", "info": " def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 235, "name": "_nested_offsets", "kind": "def", "category": "function", "info": " def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 254, "name": "plot_strips", "kind": "def", "category": "function", "info": " def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 264, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 282, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 288, "name": "jitterer", "kind": "ref", "category": "function", "info": " jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 294, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 297, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 301, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 304, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 315, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 316, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 320, "name": "plot_swarms", "kind": "def", "category": "function", "info": " def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 330, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 340, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 351, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 354, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 358, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 361, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 368, "name": "Beeswarm", "kind": "ref", "category": "function", "info": " beeswarm = Beeswarm(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 376, "name": "beeswarm", "kind": "ref", "category": "function", "info": " beeswarm(points, center)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 380, "name": "get_autoscaley_on", "kind": "ref", "category": "function", "info": " scaley = ax.get_autoscaley_on()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 382, "name": "get_autoscalex_on", "kind": "ref", "category": "function", "info": " scalex = ax.get_autoscalex_on()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 390, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(points.get_datalim(ax.transData))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 392, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=scalex, scaley=scaley)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 398, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 407, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 408, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 413, "name": "_CategoricalFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 418, "name": "_CategoricalPlotter", "kind": "def", "category": "class", "info": "establish_variables\t_group_longform\testablish_colors\thue_offsets\tnested_width\tannotate_axes\tadd_legend_data"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 424, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, x=None, y=None, hue=None, data=None,\n orient=None, order=None, hue_order=None,\n units=None):\n \"\"\"Convert input specification into a common representation.\"\"\"\n # Option 1:\n # We are plotting a wide-form dataset\n # -----------------------------------\n if x is None and y is None:\n\n # Do a sanity check on the inputs\n if hue is not None:\n error = \"Cannot use `hue` without `x` and `y`\"\n raise ValueError(error)\n\n # No hue grouping with wide inputs\n plot_hues = None\n hue_title = None\n hue_names = None\n\n # No statistical units with wide inputs\n plot_units = None\n\n # We also won't get a axes labels here\n value_label = None\n group_label = None\n\n # Option 1a:\n # The input data is a Pandas DataFrame\n # ------------------------------------\n\n if isinstance(data, pd.DataFrame):\n\n # Order the data correctly\n if order is None:\n order = []\n # Reduce to just numeric columns\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n order.append(col)\n plot_data = data[order]\n group_names = order\n group_label = data.columns.name\n\n # Convert to a list of arrays, the common representation\n iter_data = plot_data.iteritems()\n plot_data = [np.asarray(s, float) for k, s in iter_data]\n\n # Option 1b:\n # The input data is an array or list\n # ----------------------------------\n\n else:\n\n # We can't reorder the data\n if order is not None:\n error = \"Input data must be a pandas object to reorder\"\n raise ValueError(error)\n\n # The input data is an array\n if hasattr(data, \"shape\"):\n if len(data.shape) == 1:\n if np.isscalar(data[0]):\n plot_data = [data]\n else:\n plot_data = list(data)\n elif len(data.shape) == 2:\n nr, nc = data.shape\n if nr == 1 or nc == 1:\n plot_data = [data.ravel()]\n else:\n plot_data = [data[:, i] for i in range(nc)]\n else:\n error = (\"Input `data` can have no \"\n \"more than 2 dimensions\")\n raise ValueError(error)\n\n # Check if `data` is None to let us bail out here (for testing)\n elif data is None:\n plot_data = [[]]\n\n # The input data is a flat list\n elif np.isscalar(data[0]):\n plot_data = [data]\n\n # The input data is a nested list\n # This will catch some things that might fail later\n # but exhaustive checks are hard\n else:\n plot_data = data\n\n # Convert to a list of arrays, the common representation\n plot_data = [np.asarray(d, float) for d in plot_data]\n\n # The group names will just be numeric indices\n group_names = list(range(len(plot_data)))\n\n # Figure out the plotting orientation\n orient = \"h\" if str(orient).startswith(\"h\") else \"v\"\n\n # Option 2:\n # We are plotting a long-form dataset\n # -----------------------------------\n\n else:\n\n # See if we need to get variables from `data`\n if data is not None:\n x = data.get(x, x)\n y = data.get(y, y)\n hue = data.get(hue, hue)\n units = data.get(units, units)\n\n # Validate the inputs\n for var in [x, y, hue, units]:\n if isinstance(var, str):\n err = f\"Could not interpret input '{var}'\"\n raise ValueError(err)\n\n # Figure out the plotting orientation\n orient = infer_orient(\n x, y, orient, require_numeric=self.require_numeric\n )\n\n # Option 2a:\n # We are plotting a single set of data\n # ------------------------------------\n if x is None or y is None:\n\n # Determine where the data are\n vals = y if x is None else x\n\n # Put them into the common representation\n plot_data = [np.asarray(vals)]\n\n # Get a label for the value axis\n if hasattr(vals, \"name\"):\n value_label = vals.name\n else:\n value_label = None\n\n # This plot will not have group labels or hue nesting\n groups = None\n group_label = None\n group_names = []\n plot_hues = None\n hue_names = None\n hue_title = None\n plot_units = None\n\n # Option 2b:\n # We are grouping the data values by another variable\n # ---------------------------------------------------\n else:\n\n # Determine which role each variable will play\n if orient == \"v\":\n vals, groups = y, x\n else:\n vals, groups = x, y\n\n # Get the categorical axis label\n group_label = None\n if hasattr(groups, \"name\"):\n group_label = groups.name\n\n # Get the order on the categorical axis\n group_names = categorical_order(groups, order)\n\n # Group the numeric data\n plot_data, value_label = self._group_longform(vals, groups,\n group_names)\n\n # Now handle the hue levels for nested ordering\n if hue is None:\n plot_hues = None\n hue_title = None\n hue_names = None\n else:\n\n # Get the order of the hue levels\n hue_names = categorical_order(hue, hue_order)\n\n # Group the hue data\n plot_hues, hue_title = self._group_longform(hue, groups,\n group_names)\n\n # Now handle the units for nested observations\n if units is None:\n plot_units = None\n else:\n plot_units, _ = self._group_longform(units, groups,\n group_names)\n\n # Assign object attributes\n # ------------------------\n self.orient = orient\n self.plot_data = plot_data\n self.group_label = group_label\n self.value_label = value_label\n self.group_names = group_names\n self.plot_hues = plot_hues\n self.hue_title = hue_title\n self.hue_names = hue_names\n self.plot_units = plot_units\n\n def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 461, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 468, "name": "iteritems", "kind": "ref", "category": "function", "info": " iter_data = plot_data.iteritems()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 543, "name": "infer_orient", "kind": "ref", "category": "function", "info": " orient = infer_orient(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 590, "name": "categorical_order", "kind": "ref", "category": "function", "info": " group_names = categorical_order(groups, order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 593, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_data, value_label = self._group_longform(vals, groups,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 604, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(hue, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 607, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_hues, hue_title = self._group_longform(hue, groups,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 614, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_units, _ = self._group_longform(units, groups,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 629, "name": "_group_longform", "kind": "def", "category": "function", "info": " def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 640, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped_vals = vals.groupby(grouper)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 644, "name": "get_group", "kind": "ref", "category": "function", "info": " g_vals = grouped_vals.get_group(g)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 654, "name": "establish_colors", "kind": "def", "category": "function", "info": " def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 665, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 667, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 669, "name": "husl_palette", "kind": "ref", "category": "function", "info": " colors = husl_palette(n_colors, l=.7) # noqa\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 680, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 682, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 695, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 699, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(colors, desat=saturation)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 702, "name": "color_palette", "kind": "ref", "category": "function", "info": " rgb_colors = color_palette(colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 707, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " gray = mpl.colors.rgb2hex((lum, lum, lum))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 714, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 727, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 735, "name": "annotate_axes", "kind": "def", "category": "function", "info": " def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 743, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 745, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 752, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 753, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(group_names)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 755, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 756, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(group_names)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 760, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 763, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 768, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 775, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(rect)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 778, "name": "_BoxPlotter", "kind": "def", "category": "class", "info": "__init__\tdraw_boxplot\trestyle_boxplot\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 784, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 785, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 795, "name": "draw_boxplot", "kind": "def", "category": "function", "info": " def draw_boxplot(self, ax, kws):\n \"\"\"Use matplotlib to draw a boxplot on an Axes.\"\"\"\n vert = self.orient == \"v\"\n\n props = {}\n for obj in [\"box\", \"whisker\", \"cap\", \"median\", \"flier\"]:\n props[obj] = kws.pop(obj + \"props\", {})\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = np.asarray(remove_na(group_data))\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n artist_dict = ax.boxplot(box_data,\n vert=vert,\n patch_artist=True,\n positions=[i],\n widths=self.width,\n **kws)\n color = self.colors[i]\n self.restyle_boxplot(artist_dict, color, props)\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = np.asarray(remove_na(group_data[hue_mask]))\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n center = i + offsets[j]\n artist_dict = ax.boxplot(box_data,\n vert=vert,\n patch_artist=True,\n positions=[center],\n widths=self.nested_width,\n **kws)\n self.restyle_boxplot(artist_dict, self.colors[j], props)\n # Add legend data, but just for one set of boxes\n\n def restyle_boxplot(self, artist_dict, color, props):\n \"\"\"Take a drawn matplotlib boxplot and make it look nice.\"\"\"\n for box in artist_dict[\"boxes\"]:\n box.update(dict(facecolor=color,\n zorder=.9,\n edgecolor=self.gray,\n linewidth=self.linewidth))\n box.update(props[\"box\"])\n for whisk in artist_dict[\"whiskers\"]:\n whisk.update(dict(color=self.gray,\n linewidth=self.linewidth,\n linestyle=\"-\"))\n whisk.update(props[\"whisker\"])\n for cap in artist_dict[\"caps\"]:\n cap.update(dict(color=self.gray,\n linewidth=self.linewidth))\n cap.update(props[\"cap\"])\n for med in artist_dict[\"medians\"]:\n med.update(dict(color=self.gray,\n linewidth=self.linewidth))\n med.update(props[\"median\"])\n for fly in artist_dict[\"fliers\"]:\n fly.update(dict(markerfacecolor=self.gray,\n marker=\"d\",\n markeredgecolor=self.gray,\n markersize=self.fliersize))\n fly.update(props[\"flier\"])\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_boxplot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 813, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = np.asarray(remove_na(group_data))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 826, "name": "restyle_boxplot", "kind": "ref", "category": "function", "info": " self.restyle_boxplot(artist_dict, color, props)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 834, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 841, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = np.asarray(remove_na(group_data[hue_mask]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 854, "name": "restyle_boxplot", "kind": "ref", "category": "function", "info": " self.restyle_boxplot(artist_dict, self.colors[j], props)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 857, "name": "restyle_boxplot", "kind": "def", "category": "function", "info": " def restyle_boxplot(self, artist_dict, color, props):\n \"\"\"Take a drawn matplotlib boxplot and make it look nice.\"\"\"\n for box in artist_dict[\"boxes\"]:\n box.update(dict(facecolor=color,\n zorder=.9,\n edgecolor=self.gray,\n linewidth=self.linewidth))\n box.update(props[\"box\"])\n for whisk in artist_dict[\"whiskers\"]:\n whisk.update(dict(color=self.gray,\n linewidth=self.linewidth,\n linestyle=\"-\"))\n whisk.update(props[\"whisker\"])\n for cap in artist_dict[\"caps\"]:\n cap.update(dict(color=self.gray,\n linewidth=self.linewidth))\n cap.update(props[\"cap\"])\n for med in artist_dict[\"medians\"]:\n med.update(dict(color=self.gray,\n linewidth=self.linewidth))\n med.update(props[\"median\"])\n for fly in artist_dict[\"fliers\"]:\n fly.update(dict(markerfacecolor=self.gray,\n marker=\"d\",\n markeredgecolor=self.gray,\n markersize=self.fliersize))\n fly.update(props[\"flier\"])\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_boxplot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 887, "name": "draw_boxplot", "kind": "ref", "category": "function", "info": " self.draw_boxplot(ax, boxplot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 888, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 890, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 893, "name": "_ViolinPlotter", "kind": "def", "category": "class", "info": "__init__\testimate_densities\tfit_kde\tkde_support\tscale_area\tscale_width\tscale_count\tdwidth\tdraw_violins\tdraw_single_observation\tdraw_box_lines\tdraw_quartiles\tdraw_points\tdraw_stick_lines\tdraw_to_density\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 900, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 901, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 902, "name": "estimate_densities", "kind": "ref", "category": "function", "info": " self.estimate_densities(bw, cut, scale, scale_hue, gridsize)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 926, "name": "estimate_densities", "kind": "def", "category": "function", "info": " def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):\n \"\"\"Find the support and density for all of the data.\"\"\"\n # Initialize data structures to keep track of plotting data\n if self.hue_names is None:\n support = []\n density = []\n counts = np.zeros(len(self.plot_data))\n max_density = np.zeros(len(self.plot_data))\n else:\n support = [[] for _ in self.plot_data]\n density = [[] for _ in self.plot_data]\n size = len(self.group_names), len(self.hue_names)\n counts = np.zeros(size)\n max_density = np.zeros(size)\n\n for i, group_data in enumerate(self.plot_data):\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n # Strip missing datapoints\n kde_data = remove_na(group_data)\n\n # Handle special case of no data at this level\n if kde_data.size == 0:\n support.append(np.array([]))\n density.append(np.array([1.]))\n counts[i] = 0\n max_density[i] = 0\n continue\n\n # Handle special case of a single unique datapoint\n elif np.unique(kde_data).size == 1:\n support.append(np.unique(kde_data))\n density.append(np.array([1.]))\n counts[i] = 1\n max_density[i] = 0\n continue\n\n # Fit the KDE and get the used bandwidth size\n kde, bw_used = self.fit_kde(kde_data, bw)\n\n # Determine the support grid and get the density over it\n support_i = self.kde_support(kde_data, bw_used, cut, gridsize)\n density_i = kde.evaluate(support_i)\n\n # Update the data structures with these results\n support.append(support_i)\n density.append(density_i)\n counts[i] = kde_data.size\n max_density[i] = density_i.max()\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n for j, hue_level in enumerate(self.hue_names):\n\n # Handle special case of no data at this category level\n if not group_data.size:\n support[i].append(np.array([]))\n density[i].append(np.array([1.]))\n counts[i, j] = 0\n max_density[i, j] = 0\n continue\n\n # Select out the observations for this hue level\n hue_mask = self.plot_hues[i] == hue_level\n\n # Strip missing datapoints\n kde_data = remove_na(group_data[hue_mask])\n\n # Handle special case of no data at this level\n if kde_data.size == 0:\n support[i].append(np.array([]))\n density[i].append(np.array([1.]))\n counts[i, j] = 0\n max_density[i, j] = 0\n continue\n\n # Handle special case of a single unique datapoint\n elif np.unique(kde_data).size == 1:\n support[i].append(np.unique(kde_data))\n density[i].append(np.array([1.]))\n counts[i, j] = 1\n max_density[i, j] = 0\n continue\n\n # Fit the KDE and get the used bandwidth size\n kde, bw_used = self.fit_kde(kde_data, bw)\n\n # Determine the support grid and get the density over it\n support_ij = self.kde_support(kde_data, bw_used,\n cut, gridsize)\n density_ij = kde.evaluate(support_ij)\n\n # Update the data structures with these results\n support[i].append(support_ij)\n density[i].append(density_ij)\n counts[i, j] = kde_data.size\n max_density[i, j] = density_ij.max()\n\n # Scale the height of the density curve.\n # For a violinplot the density is non-quantitative.\n # The objective here is to scale the curves relative to 1 so that\n # they can be multiplied by the width parameter during plotting.\n\n if scale == \"area\":\n self.scale_area(density, max_density, scale_hue)\n\n elif scale == \"width\":\n self.scale_width(density)\n\n elif scale == \"count\":\n self.scale_count(density, counts, scale_hue)\n\n else:\n raise ValueError(f\"scale method '{scale}' not recognized\")\n\n # Set object attributes that will be used while plotting\n self.support = support\n self.density = density\n\n def fit_kde(self, x, bw):\n \"\"\"Estimate a KDE for a vector of data with flexible bandwidth.\"\"\"\n kde = gaussian_kde(x, bw)\n\n # Extract the numeric bandwidth from the KDE object\n bw_used = kde.factor\n\n # At this point, bw will be a numeric scale factor.\n # To get the actual bandwidth of the kernel, we multiple by the\n # unbiased standard deviation of the data, which we will use\n # elsewhere to compute the range of the support.\n bw_used = bw_used * x.std(ddof=1)\n\n return kde, bw_used\n\n def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 949, "name": "remove_na", "kind": "ref", "category": "function", "info": " kde_data = remove_na(group_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 968, "name": "fit_kde", "kind": "ref", "category": "function", "info": " kde, bw_used = self.fit_kde(kde_data, bw)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 971, "name": "kde_support", "kind": "ref", "category": "function", "info": " support_i = self.kde_support(kde_data, bw_used, cut, gridsize)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 998, "name": "remove_na", "kind": "ref", "category": "function", "info": " kde_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1017, "name": "fit_kde", "kind": "ref", "category": "function", "info": " kde, bw_used = self.fit_kde(kde_data, bw)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1020, "name": "kde_support", "kind": "ref", "category": "function", "info": " support_ij = self.kde_support(kde_data, bw_used,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1036, "name": "scale_area", "kind": "ref", "category": "function", "info": " self.scale_area(density, max_density, scale_hue)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1039, "name": "scale_width", "kind": "ref", "category": "function", "info": " self.scale_width(density)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1042, "name": "scale_count", "kind": "ref", "category": "function", "info": " self.scale_count(density, counts, scale_hue)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1051, "name": "fit_kde", "kind": "def", "category": "function", "info": " def fit_kde(self, x, bw):\n \"\"\"Estimate a KDE for a vector of data with flexible bandwidth.\"\"\"\n kde = gaussian_kde(x, bw)\n\n # Extract the numeric bandwidth from the KDE object\n bw_used = kde.factor\n\n # At this point, bw will be a numeric scale factor.\n # To get the actual bandwidth of the kernel, we multiple by the\n # unbiased standard deviation of the data, which we will use\n # elsewhere to compute the range of the support.\n bw_used = bw_used * x.std(ddof=1)\n\n return kde, bw_used\n\n def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1066, "name": "kde_support", "kind": "def", "category": "function", "info": " def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1072, "name": "scale_area", "kind": "def", "category": "function", "info": " def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1094, "name": "scale_width", "kind": "def", "category": "function", "info": " def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1104, "name": "scale_count", "kind": "def", "category": "function", "info": " def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1128, "name": "dwidth", "kind": "def", "category": "function", "info": " def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1137, "name": "draw_violins", "kind": "def", "category": "function", "info": " def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1157, "name": "item", "kind": "ref", "category": "function", "info": " val = support.item()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1158, "name": "item", "kind": "ref", "category": "function", "info": " d = density.item()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1159, "name": "draw_single_observation", "kind": "ref", "category": "function", "info": " self.draw_single_observation(ax, i, val, d)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1164, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1175, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1179, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1183, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data, support, density, i)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1187, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data, support, density, i)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1191, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1205, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1213, "name": "item", "kind": "ref", "category": "function", "info": " val = support.item()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1214, "name": "item", "kind": "ref", "category": "function", "info": " d = density.item()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1218, "name": "draw_single_observation", "kind": "ref", "category": "function", "info": " self.draw_single_observation(ax, at_group, val, d)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1228, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1233, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1244, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1248, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1254, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1264, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1268, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1272, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1279, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1290, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1294, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i + offsets[j])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1298, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1304, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1310, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i + offsets[j])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1312, "name": "draw_single_observation", "kind": "def", "category": "function", "info": " def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1326, "name": "draw_box_lines", "kind": "def", "category": "function", "info": " def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1360, "name": "draw_quartiles", "kind": "def", "category": "function", "info": " def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1364, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q25, support, density, split,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1367, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q50, support, density, split,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1370, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q75, support, density, split,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1374, "name": "draw_points", "kind": "def", "category": "function", "info": " def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1387, "name": "draw_stick_lines", "kind": "def", "category": "function", "info": " def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1391, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, val, support, density, split,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1394, "name": "draw_to_density", "kind": "def", "category": "function", "info": " def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1418, "name": "draw_violins", "kind": "ref", "category": "function", "info": " self.draw_violins(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1419, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1421, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1424, "name": "_CategoricalStatPlotter", "kind": "def", "category": "class", "info": "nested_width\testimate_statistic\tdraw_confints"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1429, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1437, "name": "estimate_statistic", "kind": "def", "category": "function", "info": " def estimate_statistic(self, estimator, ci, n_boot, seed):\n\n if self.hue_names is None:\n statistic = []\n confint = []\n else:\n statistic = [[] for _ in self.plot_data]\n confint = [[] for _ in self.plot_data]\n\n for i, group_data in enumerate(self.plot_data):\n\n # Option 1: we have a single layer of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n if self.plot_units is None:\n stat_data = remove_na(group_data)\n unit_data = None\n else:\n unit_data = self.plot_units[i]\n have = pd.notnull(np.c_[group_data, unit_data]).all(axis=1)\n stat_data = group_data[have]\n unit_data = unit_data[have]\n\n # Estimate a statistic from the vector of data\n if not stat_data.size:\n statistic.append(np.nan)\n else:\n statistic.append(estimator(stat_data))\n\n # Get a confidence interval for this estimate\n if ci is not None:\n\n if stat_data.size < 2:\n confint.append([np.nan, np.nan])\n continue\n\n if ci == \"sd\":\n\n estimate = estimator(stat_data)\n sd = np.std(stat_data)\n confint.append((estimate - sd, estimate + sd))\n\n else:\n\n boots = bootstrap(stat_data, func=estimator,\n n_boot=n_boot,\n units=unit_data,\n seed=seed)\n confint.append(utils.ci(boots, ci))\n\n # Option 2: we are grouping by a hue layer\n # ----------------------------------------\n\n else:\n for j, hue_level in enumerate(self.hue_names):\n\n if not self.plot_hues[i].size:\n statistic[i].append(np.nan)\n if ci is not None:\n confint[i].append((np.nan, np.nan))\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n if self.plot_units is None:\n stat_data = remove_na(group_data[hue_mask])\n unit_data = None\n else:\n group_units = self.plot_units[i]\n have = pd.notnull(\n np.c_[group_data, group_units]\n ).all(axis=1)\n stat_data = group_data[hue_mask & have]\n unit_data = group_units[hue_mask & have]\n\n # Estimate a statistic from the vector of data\n if not stat_data.size:\n statistic[i].append(np.nan)\n else:\n statistic[i].append(estimator(stat_data))\n\n # Get a confidence interval for this estimate\n if ci is not None:\n\n if stat_data.size < 2:\n confint[i].append([np.nan, np.nan])\n continue\n\n if ci == \"sd\":\n\n estimate = estimator(stat_data)\n sd = np.std(stat_data)\n confint[i].append((estimate - sd, estimate + sd))\n\n else:\n\n boots = bootstrap(stat_data, func=estimator,\n n_boot=n_boot,\n units=unit_data,\n seed=seed)\n confint[i].append(utils.ci(boots, ci))\n\n # Save the resulting values for plotting\n self.statistic = np.array(statistic)\n self.confint = np.array(confint)\n\n def draw_confints(self, ax, at_group, confint, colors,\n errwidth=None, capsize=None, **kws):\n\n if errwidth is not None:\n kws.setdefault(\"lw\", errwidth)\n else:\n kws.setdefault(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n\n for at, (ci_low, ci_high), color in zip(at_group,\n confint,\n colors):\n if self.orient == \"v\":\n ax.plot([at, at], [ci_low, ci_high], color=color, **kws)\n if capsize is not None:\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_low, ci_low], color=color, **kws)\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_high, ci_high], color=color, **kws)\n else:\n ax.plot([ci_low, ci_high], [at, at], color=color, **kws)\n if capsize is not None:\n ax.plot([ci_low, ci_low],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n ax.plot([ci_high, ci_high],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1454, "name": "remove_na", "kind": "ref", "category": "function", "info": " stat_data = remove_na(group_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1466, "name": "estimator", "kind": "ref", "category": "function", "info": " statistic.append(estimator(stat_data))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1477, "name": "estimator", "kind": "ref", "category": "function", "info": " estimate = estimator(stat_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1483, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = bootstrap(stat_data, func=estimator,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1487, "name": "ci", "kind": "ref", "category": "function", "info": " confint.append(utils.ci(boots, ci))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1503, "name": "remove_na", "kind": "ref", "category": "function", "info": " stat_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1517, "name": "estimator", "kind": "ref", "category": "function", "info": " statistic[i].append(estimator(stat_data))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1528, "name": "estimator", "kind": "ref", "category": "function", "info": " estimate = estimator(stat_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1534, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = bootstrap(stat_data, func=estimator,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1538, "name": "ci", "kind": "ref", "category": "function", "info": " confint[i].append(utils.ci(boots, ci))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1544, "name": "draw_confints", "kind": "def", "category": "function", "info": " def draw_confints(self, ax, at_group, confint, colors,\n errwidth=None, capsize=None, **kws):\n\n if errwidth is not None:\n kws.setdefault(\"lw\", errwidth)\n else:\n kws.setdefault(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n\n for at, (ci_low, ci_high), color in zip(at_group,\n confint,\n colors):\n if self.orient == \"v\":\n ax.plot([at, at], [ci_low, ci_high], color=color, **kws)\n if capsize is not None:\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_low, ci_low], color=color, **kws)\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_high, ci_high], color=color, **kws)\n else:\n ax.plot([ci_low, ci_high], [at, at], color=color, **kws)\n if capsize is not None:\n ax.plot([ci_low, ci_low],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n ax.plot([ci_high, ci_high],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1573, "name": "_BarPlotter", "kind": "def", "category": "class", "info": "__init__\tdraw_bars\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1581, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1583, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1584, "name": "estimate_statistic", "kind": "ref", "category": "function", "info": " self.estimate_statistic(estimator, ci, n_boot, seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1592, "name": "draw_bars", "kind": "def", "category": "function", "info": " def draw_bars(self, ax, kws):\n \"\"\"Draw the bars onto `ax`.\"\"\"\n # Get the right matplotlib function depending on the orientation\n barfunc = ax.bar if self.orient == \"v\" else ax.barh\n barpos = np.arange(len(self.statistic))\n\n if self.plot_hues is None:\n\n # Draw the bars\n barfunc(barpos, self.statistic, self.width,\n color=self.colors, align=\"center\", **kws)\n\n # Draw the confidence intervals\n errcolors = [self.errcolor] * len(barpos)\n self.draw_confints(ax,\n barpos,\n self.confint,\n errcolors,\n self.errwidth,\n self.capsize)\n\n else:\n\n for j, hue_level in enumerate(self.hue_names):\n\n # Draw the bars\n offpos = barpos + self.hue_offsets[j]\n barfunc(offpos, self.statistic[:, j], self.nested_width,\n color=self.colors[j], align=\"center\",\n label=hue_level, **kws)\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = self.confint[:, j]\n errcolors = [self.errcolor] * len(offpos)\n self.draw_confints(ax,\n offpos,\n confint,\n errcolors,\n self.errwidth,\n self.capsize)\n\n def plot(self, ax, bar_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_bars(ax, bar_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1601, "name": "barfunc", "kind": "ref", "category": "function", "info": " barfunc(barpos, self.statistic, self.width,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1606, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1619, "name": "barfunc", "kind": "ref", "category": "function", "info": " barfunc(offpos, self.statistic[:, j], self.nested_width,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1627, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1636, "name": "draw_bars", "kind": "ref", "category": "function", "info": " self.draw_bars(ax, bar_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1637, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1639, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1642, "name": "_PointPlotter", "kind": "def", "category": "class", "info": "__init__\thue_offsets\tdraw_points\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1652, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1654, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, 1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1655, "name": "estimate_statistic", "kind": "ref", "category": "function", "info": " self.estimate_statistic(estimator, ci, n_boot, seed)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1659, "name": "color_palette", "kind": "ref", "category": "function", "info": " self.colors = [color_palette()[0]] * len(self.colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1687, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1696, "name": "draw_points", "kind": "def", "category": "function", "info": " def draw_points(self, ax):\n \"\"\"Draw the main data components of the plot.\"\"\"\n # Get the center positions on the categorical axis\n pointpos = np.arange(len(self.statistic))\n\n # Get the size of the plot elements\n lw = mpl.rcParams[\"lines.linewidth\"] * 1.8 * self.scale\n mew = lw * .75\n markersize = np.pi * np.square(lw) * 2\n\n if self.plot_hues is None:\n\n # Draw lines joining each estimate point\n if self.join:\n color = self.colors[0]\n ls = self.linestyles[0]\n if self.orient == \"h\":\n ax.plot(self.statistic, pointpos,\n color=color, ls=ls, lw=lw)\n else:\n ax.plot(pointpos, self.statistic,\n color=color, ls=ls, lw=lw)\n\n # Draw the confidence intervals\n self.draw_confints(ax, pointpos, self.confint, self.colors,\n self.errwidth, self.capsize)\n\n # Draw the estimate points\n marker = self.markers[0]\n colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]\n if self.orient == \"h\":\n x, y = self.statistic, pointpos\n else:\n x, y = pointpos, self.statistic\n ax.scatter(x, y,\n linewidth=mew, marker=marker, s=markersize,\n facecolor=colors, edgecolor=colors)\n\n else:\n\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Determine the values to plot for this level\n statistic = self.statistic[:, j]\n\n # Determine the position on the categorical and z axes\n offpos = pointpos + offsets[j]\n z = j + 1\n\n # Draw lines joining each estimate point\n if self.join:\n color = self.colors[j]\n ls = self.linestyles[j]\n if self.orient == \"h\":\n ax.plot(statistic, offpos, color=color,\n zorder=z, ls=ls, lw=lw)\n else:\n ax.plot(offpos, statistic, color=color,\n zorder=z, ls=ls, lw=lw)\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = self.confint[:, j]\n errcolors = [self.colors[j]] * len(offpos)\n self.draw_confints(ax, offpos, confint, errcolors,\n self.errwidth, self.capsize,\n zorder=z)\n\n # Draw the estimate points\n n_points = len(remove_na(offpos))\n marker = self.markers[j]\n color = mpl.colors.colorConverter.to_rgb(self.colors[j])\n\n if self.orient == \"h\":\n x, y = statistic, offpos\n else:\n x, y = offpos, statistic\n\n if not len(remove_na(statistic)):\n x = y = [np.nan] * n_points\n\n ax.scatter(x, y, label=hue_level,\n facecolor=color, edgecolor=color,\n linewidth=mew, marker=marker, s=markersize,\n zorder=z)\n\n def plot(self, ax):\n \"\"\"Make the plot.\"\"\"\n self.draw_points(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1720, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax, pointpos, self.confint, self.colors,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1725, "name": "to_rgb", "kind": "ref", "category": "function", "info": " colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1761, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax, offpos, confint, errcolors,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1766, "name": "remove_na", "kind": "ref", "category": "function", "info": " n_points = len(remove_na(offpos))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1768, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.colorConverter.to_rgb(self.colors[j])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1775, "name": "remove_na", "kind": "ref", "category": "function", "info": " if not len(remove_na(statistic)):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1785, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1786, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1788, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1791, "name": "_CountPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1795, "name": "_LVPlotter", "kind": "def", "category": "class", "info": "__init__\t_lv_box_ends\t_lv_outliers\t_width_functions\t_lvplot\tdraw_letter_value_plot\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1835, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1836, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1838, "name": "_lv_box_ends", "kind": "def", "category": "function", "info": " def _lv_box_ends(self, vals):\n \"\"\"Get the number of data points and calculate `depth` of\n letter-value plot.\"\"\"\n vals = np.asarray(vals)\n # Remove infinite values while handling a 'object' dtype\n # that can come from pd.Float64Dtype() input\n with pd.option_context('mode.use_inf_as_null', True):\n vals = vals[~pd.isnull(vals)]\n n = len(vals)\n p = self.outlier_prop\n\n # Select the depth, i.e. number of boxes to draw, based on the method\n if self.k_depth == 'full':\n # extend boxes to 100% of the data\n k = int(np.log2(n)) + 1\n elif self.k_depth == 'tukey':\n # This results with 5-8 points in each tail\n k = int(np.log2(n)) - 3\n elif self.k_depth == 'proportion':\n k = int(np.log2(n)) - int(np.log2(n * p)) + 1\n elif self.k_depth == 'trustworthy':\n point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n k = int(np.log2(n / point_conf)) + 1\n else:\n k = int(self.k_depth) # allow having k as input\n # If the number happens to be less than 1, set k to 1\n if k < 1:\n k = 1\n\n # Calculate the upper end for each of the k boxes\n upper = [100 * (1 - 0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Calculate the lower end for each of the k boxes\n lower = [100 * (0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Stitch the box ends together\n percentile_ends = [(i, j) for i, j in zip(lower, upper)]\n box_ends = [np.percentile(vals, q) for q in percentile_ends]\n return box_ends, k\n\n def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, **kws):\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n kws.update({\n 'color': self.gray, 'linestyle': '-', 'linewidth': self.linewidth\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1859, "name": "_normal_quantile_func", "kind": "ref", "category": "function", "info": " point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1876, "name": "_lv_outliers", "kind": "def", "category": "function", "info": " def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, **kws):\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n kws.update({\n 'color': self.gray, 'linestyle': '-', 'linewidth': self.linewidth\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1885, "name": "_width_functions", "kind": "def", "category": "function", "info": " def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, **kws):\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n kws.update({\n 'color': self.gray, 'linestyle': '-', 'linewidth': self.linewidth\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1892, "name": "_lvplot", "kind": "def", "category": "function", "info": " def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, **kws):\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n kws.update({\n 'color': self.gray, 'linestyle': '-', 'linewidth': self.linewidth\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1915, "name": "_lv_box_ends", "kind": "ref", "category": "function", "info": " box_ends, k = self._lv_box_ends(box_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1919, "name": "_width_functions", "kind": "ref", "category": "function", "info": " width = self._width_functions(self.scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1922, "name": "height", "kind": "def", "category": "function", "info": " def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1926, "name": "vert_perc_box", "kind": "def", "category": "function", "info": " def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1929, "name": "height", "kind": "ref", "category": "function", "info": " height(b), fill=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1932, "name": "horz_perc_box", "kind": "def", "category": "function", "info": " def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n c=\".15\",\n alpha=0.45,\n solid_capstyle=\"butt\",\n linewidth=self.linewidth,\n **kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers, marker='d',\n c=self.gray, **kws)\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n collection = PatchCollection(\n boxes, cmap=cmap, edgecolor=self.gray, linewidth=self.linewidth\n )\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1934, "name": "height", "kind": "ref", "category": "function", "info": " height(b), widths * w,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1939, "name": "width", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1939, "name": "height", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1949, "name": "_lv_outliers", "kind": "ref", "category": "function", "info": " outliers = self._lv_outliers(box_data, k)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1950, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex_color = mpl.colors.rgb2hex(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1966, "name": "box_func", "kind": "ref", "category": "function", "info": " boxes = [box_func(x, b[0], i, k, b[1])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1987, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1989, "name": "cmap", "kind": "ref", "category": "function", "info": " rgb = [hex_color, cmap(.85)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1990, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1999, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(collection)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2001, "name": "draw_letter_value_plot", "kind": "def", "category": "function", "info": " def draw_letter_value_plot(self, ax, kws):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n **kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n **kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2013, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2021, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2035, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2042, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2050, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2058, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2062, "name": "draw_letter_value_plot", "kind": "ref", "category": "function", "info": " self.draw_letter_value_plot(ax, boxplot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2063, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2065, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2246, "name": "_BoxPlotter", "kind": "ref", "category": "function", "info": " plotter = _BoxPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2399, "name": "_ViolinPlotter", "kind": "ref", "category": "function", "info": " plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2621, "name": "boxenplot", "kind": "def", "category": "function", "info": "def boxenplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75,\n width=.8, dodge=True, k_depth='tukey', linewidth=None,\n scale='exponential', outlier_prop=0.007, trust_alpha=0.05, showfliers=True,\n ax=None, **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2629, "name": "_LVPlotter", "kind": "ref", "category": "function", "info": " plotter = _LVPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2786, "name": "stripplot", "kind": "def", "category": "function", "info": "def stripplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n jitter=True, dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0, ax=None,\n hue_norm=None, native_scale=False, formatter=None, legend=\"auto\",\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2794, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2796, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2807, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2809, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2811, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2812, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2814, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2816, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2828, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2839, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2840, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2908, "name": "swarmplot", "kind": "def", "category": "function", "info": "def swarmplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0, ax=None,\n hue_norm=None, native_scale=False, formatter=None, legend=\"auto\", warn_thresh=.05,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2916, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2918, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2929, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2931, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2936, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2937, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2939, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2941, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2955, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2966, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2967, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3038, "name": "barplot", "kind": "def", "category": "function", "info": "def barplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=np.mean, ci=95, n_boot=1000, units=None, seed=None,\n orient=None, color=None, palette=None, saturation=.75,\n errcolor=\".26\", errwidth=None, capsize=None, dodge=True,\n ax=None,\n **kwargs,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3047, "name": "_BarPlotter", "kind": "ref", "category": "function", "info": " plotter = _BarPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3227, "name": "pointplot", "kind": "def", "category": "function", "info": "def pointplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=np.mean, ci=95, n_boot=1000, units=None, seed=None,\n markers=\"o\", linestyles=\"-\", dodge=False, join=True, scale=1,\n orient=None, color=None, palette=None, errwidth=None,\n capsize=None, ax=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3236, "name": "_PointPlotter", "kind": "ref", "category": "function", "info": " plotter = _PointPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3431, "name": "countplot", "kind": "def", "category": "function", "info": "def countplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75,\n dodge=True, ax=None, **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3455, "name": "_CountPlotter", "kind": "ref", "category": "function", "info": " plotter = _CountPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3565, "name": "factorplot", "kind": "def", "category": "function", "info": "def factorplot(*args, **kwargs):\n \"\"\"Deprecated; please use `catplot` instead.\"\"\"\n\n msg = (\n \"The `factorplot` function has been renamed to `catplot`. The \"\n \"original name will be removed in a future release. Please update \"\n \"your code. Note that the default `kind` in `factorplot` (`'point'`) \"\n \"has changed `'strip'` in `catplot`.\"\n )\n warnings.warn(msg)\n\n if \"size\" in kwargs:\n kwargs[\"height\"] = kwargs.pop(\"size\")\n msg = (\"The `size` parameter has been renamed to `height`; \"\n \"please update your code.\")\n warnings.warn(msg, UserWarning)\n\n kwargs.setdefault(\"kind\", \"point\")\n\n return catplot(*args, **kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3584, "name": "catplot", "kind": "ref", "category": "function", "info": " return catplot(*args, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3587, "name": "catplot", "kind": "def", "category": "function", "info": "def catplot(\n data=None, *, x=None, y=None, hue=None, row=None, col=None,\n col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,\n units=None, seed=None, order=None, hue_order=None, row_order=None,\n col_order=None, kind=\"strip\", height=5, aspect=1,\n orient=None, color=None, palette=None,\n legend=\"auto\", legend_out=True, sharex=True, sharey=True,\n margin_titles=False, facet_kws=None,\n hue_norm=None, native_scale=False, formatter=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3623, "name": "_CategoricalFacetPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalFacetPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3625, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalFacetPlotter.get_semantics(locals()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3640, "name": "rename", "kind": "ref", "category": "function", "info": " data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3641, "name": "duplicated", "kind": "ref", "category": "function", "info": " data = data.loc[:, ~data.columns.duplicated()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3649, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3664, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3666, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3671, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3672, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3673, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3694, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3718, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3728, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3730, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3734, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3739, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " g._update_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3743, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3777, "name": "_CategoricalPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotter()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3779, "name": "establish_variables", "kind": "ref", "category": "function", "info": " p.establish_variables(x_, y_, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3803, "name": "establish_colors", "kind": "ref", "category": "function", "info": " p.establish_colors(color, palette, 1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3831, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(**facet_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3834, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(plot_func, x=x, y=y, hue=hue, **plot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3837, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.value_label, p.group_label)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3839, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.group_label, p.value_label)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3844, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(x_var=\"count\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3846, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(y_var=\"count\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3850, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4015, "name": "Beeswarm", "kind": "def", "category": "class", "info": "__init__\t__call__\tbeeswarm\tcould_overlap\tposition_candidates\tfirst_non_overlapping_candidate\tadd_gutters"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4043, "name": "transform", "kind": "ref", "category": "function", "info": " orig_xy = ax.transData.transform(orig_xy_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4050, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4053, "name": "item", "kind": "ref", "category": "function", "info": " edge = points.get_linewidth().item()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4063, "name": "beeswarm", "kind": "ref", "category": "function", "info": " new_xyr[sorter] = self.beeswarm(orig_xyr)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4070, "name": "inverted", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4070, "name": "transform", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4077, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_y_data, center, log_scale=log_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4079, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_x_data, center, log_scale=log_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4087, "name": "beeswarm", "kind": "def", "category": "function", "info": " def beeswarm(self, orig_xyr):\n \"\"\"Adjust x position of points to avoid overlaps.\"\"\"\n # In this method, `x` is always the categorical axis\n # Center of the swarm, in point coordinates\n midline = orig_xyr[0, 0]\n\n # Start the swarm with the first point\n swarm = np.atleast_2d(orig_xyr[0])\n\n # Loop over the remaining points\n for xyr_i in orig_xyr[1:]:\n\n # Find the points in the swarm that could possibly\n # overlap with the point we are currently placing\n neighbors = self.could_overlap(xyr_i, swarm)\n\n # Find positions that would be valid individually\n # with respect to each of the swarm neighbors\n candidates = self.position_candidates(xyr_i, neighbors)\n\n # Sort candidates by their centrality\n offsets = np.abs(candidates[:, 0] - midline)\n candidates = candidates[np.argsort(offsets)]\n\n # Find the first candidate that does not overlap any neighbors\n new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n\n # Place it into the swarm\n swarm = np.vstack([swarm, new_xyr_i])\n\n return swarm\n\n def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4101, "name": "could_overlap", "kind": "ref", "category": "function", "info": " neighbors = self.could_overlap(xyr_i, swarm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4105, "name": "position_candidates", "kind": "ref", "category": "function", "info": " candidates = self.position_candidates(xyr_i, neighbors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4112, "name": "first_non_overlapping_candidate", "kind": "ref", "category": "function", "info": " new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4119, "name": "could_overlap", "kind": "def", "category": "function", "info": " def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4133, "name": "position_candidates", "kind": "def", "category": "function", "info": " def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4150, "name": "first_non_overlapping_candidate", "kind": "def", "category": "function", "info": " def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 4183, "name": "add_gutters", "kind": "def", "category": "function", "info": " def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 83, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 85, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 86, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " dist=DocstringComponents(_dist_params),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 87, "name": "from_function_params", "kind": "ref", "category": "function", "info": " kde=DocstringComponents.from_function_params(KDE.__init__),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 88, "name": "from_function_params", "kind": "ref", "category": "function", "info": " hist=DocstringComponents.from_function_params(Histogram.__init__),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 89, "name": "from_function_params", "kind": "ref", "category": "function", "info": " ecdf=DocstringComponents.from_function_params(ECDF.__init__),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 98, "name": "_DistributionPlotter", "kind": "def", "category": "class", "info": "__init__\tunivariate\tdata_variable\thas_xy_data\t_add_legend\t_artist_kws\t_quantile_to_level\t_cmap_from_color\t_default_discrete\t_resolve_multiple\t_compute_univariate_density\tplot_univariate_histogram\tplot_bivariate_histogram\tplot_univariate_density\tplot_bivariate_density\tplot_univariate_ecdf\tplot_rug\t_plot_single_rug"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 114, "name": "univariate", "kind": "def", "category": "function", "info": " def univariate(self):\n \"\"\"Return True if only x or y are used.\"\"\"\n # TODO this could go down to core, but putting it here now.\n # We'd want to be conceptually clear that univariate only applies\n # to x/y and not to other semantics, which can exist.\n # We haven't settled on a good conceptual name for x/y.\n return bool({\"x\", \"y\"} - set(self.variables))\n\n @property\n def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 123, "name": "data_variable", "kind": "def", "category": "function", "info": " def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 131, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 136, "name": "_add_legend", "kind": "def", "category": "function", "info": " def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 145, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 147, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " kws = self._artist_kws(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 157, "name": "artist", "kind": "ref", "category": "function", "info": " handles.append(artist(**kws))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 164, "name": "add_legend", "kind": "ref", "category": "function", "info": " ax_obj.add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 171, "name": "_artist_kws", "kind": "def", "category": "function", "info": " def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 175, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 194, "name": "_quantile_to_level", "kind": "def", "category": "function", "info": " def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 204, "name": "_cmap_from_color", "kind": "def", "category": "function", "info": " def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 209, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, _ = husl.rgb_to_husl(r, g, b)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 215, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 216, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(colors[::-1])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 218, "name": "_default_discrete", "kind": "def", "category": "function", "info": " def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 228, "name": "_resolve_multiple", "kind": "def", "category": "function", "info": " def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n hist[\"widths\"] /= n\n hist[\"edges\"] += hue_levels.index(level) * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 248, "name": "tolist", "kind": "ref", "category": "function", "info": " for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 265, "name": "div", "kind": "ref", "category": "function", "info": " .div(norm_constant, axis=\"index\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 270, "name": "shift", "kind": "ref", "category": "function", "info": " .shift(1, axis=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 271, "name": "fillna", "kind": "ref", "category": "function", "info": " .fillna(0))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 281, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = curves[key].reset_index(name=\"heights\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 285, "name": "set_index", "kind": "ref", "category": "function", "info": " curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 293, "name": "_compute_univariate_density", "kind": "def", "category": "function", "info": " def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n observation_variance = observations.var()\n if math.isclose(observation_variance, 0) or np.isnan(observation_variance):\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n density, support = estimator(observations, weights=weights)\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 304, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 308, "name": "dropna", "kind": "ref", "category": "function", "info": " all_observations = self.comp_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 309, "name": "define_support", "kind": "ref", "category": "function", "info": " estimator.define_support(all_observations[data_variable])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 313, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 321, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 345, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(observations, weights=weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 364, "name": "plot_univariate_histogram", "kind": "def", "category": "function", "info": " def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n if estimate_kws[\"discrete\"] and element != \"bars\":\n raise ValueError(\"`element` must be 'bars' when `discrete` is True\")\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 387, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 388, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 412, "name": "Histogram", "kind": "ref", "category": "function", "info": " estimator = Histogram(**estimate_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 416, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 421, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " estimator.define_bin_params(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 437, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 438, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 448, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 462, "name": "estimator", "kind": "ref", "category": "function", "info": " heights, edges = estimator(observations, weights=weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 474, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 481, "name": "from_arrays", "kind": "ref", "category": "function", "info": " index = pd.MultiIndex.from_arrays([\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 495, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " histograms, baselines = self._resolve_multiple(histograms, multiple)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 497, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, _ = self._resolve_multiple(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 505, "name": "to_frame", "kind": "ref", "category": "function", "info": " bin_vals = histograms.index.to_frame()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 510, "name": "idxmax", "kind": "ref", "category": "function", "info": " edges.max() + widths.loc[edges.idxmax()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 535, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 538, "name": "rename", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 538, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 541, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 545, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 549, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 558, "name": "plot_func", "kind": "ref", "category": "function", "info": " artists = plot_func(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 654, "name": "to_frame", "kind": "ref", "category": "function", "info": " h.index.to_frame() for _, h in histograms.items()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 655, "name": "reset_index", "kind": "ref", "category": "function", "info": " ]).reset_index(drop=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 656, "name": "idxmin", "kind": "ref", "category": "function", "info": " thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 664, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 666, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 670, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 674, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([left_edge + binwidth] * 2)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 675, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([left_edge] * 2)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 712, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 723, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 727, "name": "plot_bivariate_histogram", "kind": "def", "category": "function", "info": " def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 741, "name": "Histogram", "kind": "ref", "category": "function", "info": " estimator = Histogram(**estimate_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 745, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 747, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " estimator.define_bin_params(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 758, "name": "iter_data", "kind": "ref", "category": "function", "info": " for _, sub_data in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 759, "name": "estimator", "kind": "ref", "category": "function", "info": " sub_heights, _ = estimator(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 767, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(full_heights, pthresh)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 772, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(full_heights, pmax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 784, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 790, "name": "estimator", "kind": "ref", "category": "function", "info": " heights, (x_edges, y_edges) = estimator(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 797, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 799, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 809, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 810, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 815, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 817, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 822, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(heights, pmax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 828, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(heights, pthresh)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 830, "name": "masked_less_equal", "kind": "ref", "category": "function", "info": " heights = np.ma.masked_less_equal(heights, thresh)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 833, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 837, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 838, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 869, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 880, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 884, "name": "plot_univariate_density", "kind": "def", "category": "function", "info": " def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 906, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " plot_kws = _normalize_kwargs(plot_kws, artist)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 909, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 917, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 920, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 930, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, baselines = self._resolve_multiple(densities, multiple)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 953, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 964, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 967, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 971, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1005, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1015, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1019, "name": "plot_bivariate_density", "kind": "def", "category": "function", "info": " def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[[\"x\", \"y\"]]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Check that KDE will not error out\n variance = observations[[\"x\", \"y\"]].var()\n if any(math.isclose(x, 0) for x in variance) or variance.isna().any():\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning)\n continue\n\n # Estimate the density of observations at this level\n observations = observations[\"x\"], observations[\"y\"]\n density, support = estimator(*observations, weights=weights)\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1037, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1042, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1047, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1071, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(*observations, weights=weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1075, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1077, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1100, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " common_levels = self._quantile_to_level(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1106, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " k: self._quantile_to_level(d, levels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1130, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(default_color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1138, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1143, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1146, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1148, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " contour_kws[\"cmap\"] = self._cmap_from_color(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1152, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1170, "name": "contour_func", "kind": "ref", "category": "function", "info": " cset = contour_func(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1188, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1203, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1207, "name": "plot_univariate_ecdf", "kind": "def", "category": "function", "info": " def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1209, "name": "ECDF", "kind": "ref", "category": "function", "info": " estimator = ECDF(**estimate_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1216, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1226, "name": "estimator", "kind": "ref", "category": "function", "info": " stat, vals = estimator(observations, weights=weights)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1231, "name": "_hue_map", "kind": "ref", "category": "function", "info": " artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1235, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1253, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1266, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1272, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1276, "name": "plot_rug", "kind": "def", "category": "function", "info": " def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1278, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1280, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1297, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1299, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1302, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1306, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1310, "name": "_plot_single_rug", "kind": "def", "category": "function", "info": " def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1317, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1322, "name": "_hue_map", "kind": "ref", "category": "function", "info": " colors = self._hue_map(sub_data[\"hue\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1343, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(LineCollection(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1347, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1350, "name": "_DistributionFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1359, "name": "histplot", "kind": "def", "category": "function", "info": "def histplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Histogram computation parameters\n stat=\"count\", bins=\"auto\", binwidth=None, binrange=None,\n discrete=None, cumulative=False, common_bins=True, common_norm=True,\n # Histogram appearance parameters\n multiple=\"layer\", element=\"bars\", fill=True, shrink=1,\n # Histogram smoothing with a kernel density estimate\n kde=False, kde_kws=None, line_kws=None,\n # Bivariate histogram parameters\n thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1380, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1382, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1385, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1390, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1397, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1404, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " discrete = p._default_discrete()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1417, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1435, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1582, "name": "kdeplot", "kind": "def", "category": "function", "info": "def kdeplot(\n data=None, *, x=None, y=None, hue=None, weights=None,\n palette=None, hue_order=None, hue_norm=None, color=None, fill=None,\n multiple=\"layer\", common_norm=True, common_grid=False, cumulative=False,\n bw_method=\"scott\", bw_adjust=1, warn_singular=True, log_scale=None,\n levels=10, thresh=.05, gridsize=200, cut=3, clip=None,\n legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,\n **kwargs,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1670, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1672, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1675, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1680, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, allowed_types=[\"numeric\", \"datetime\"], log_scale=log_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1683, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1702, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1716, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1862, "name": "ecdfplot", "kind": "def", "category": "function", "info": "def ecdfplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Computation parameters\n stat=\"proportion\", complementary=False,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1876, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1878, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1881, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1892, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1895, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1909, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1974, "name": "rugplot", "kind": "def", "category": "function", "info": "def rugplot(\n data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,\n palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2032, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2034, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2036, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2041, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2044, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2049, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(height, expand_margins, legend, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2096, "name": "displot", "kind": "def", "category": "function", "info": "def displot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, row=None, col=None, weights=None,\n # Other plot parameters\n kind=\"hist\", rug=False, rug_kws=None, log_scale=None, legend=True,\n # Hue-mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Faceting parameters\n col_wrap=None, row_order=None, col_order=None,\n height=5, aspect=1, facet_kws=None,\n **kwargs,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2110, "name": "_DistributionFacetPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionFacetPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2112, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionFacetPlotter.get_semantics(locals())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2115, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2117, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", [\"hist\", \"kde\", \"ecdf\"], kind)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2136, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2137, "name": "duplicated", "kind": "ref", "category": "function", "info": " grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2145, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2158, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g, allowed_types=allowed_types, log_scale=log_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2178, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2186, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " estimate_kws[\"discrete\"] = p._default_discrete()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2194, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2195, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(**hist_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2199, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2200, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(**hist_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2208, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2219, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2220, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(**kde_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2224, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2225, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(**kde_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2234, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2243, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2244, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(**ecdf_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2255, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(rug_kws, p.plot_rug, rugplot)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2259, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(**rug_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2263, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2264, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " x_var=p.variables.get(\"x\", g.axes.flat[0].get_xlabel()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2265, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " y_var=p.variables.get(\"y\", g.axes.flat[0].get_ylabel()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2267, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2283, "name": "rename", "kind": "ref", "category": "function", "info": " g.data = p.plot_data.rename(columns=wide_cols)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2375, "name": "_freedman_diaconis_bins", "kind": "def", "category": "function", "info": "def _freedman_diaconis_bins(a):\n \"\"\"Calculate number of hist bins using Freedman-Diaconis rule.\"\"\"\n # From https://stats.stackexchange.com/questions/798/\n a = np.asarray(a)\n if len(a) < 2:\n return 1\n iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n h = 2 * iqr / (len(a) ** (1 / 3))\n # fall back to sqrt(a) bins if iqr is 0\n if h == 0:\n return int(np.sqrt(a.size))\n else:\n return int(np.ceil((a.max() - a.min()) / h))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2381, "name": "reduce", "kind": "ref", "category": "function", "info": " iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2390, "name": "distplot", "kind": "def", "category": "function", "info": "def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,\n hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,\n color=None, vertical=False, norm_hist=False, axlabel=None,\n label=None, ax=None, x=None):\n \"\"\"\n DEPRECATED\n\n This function has been deprecated and will be removed in seaborn v0.14.0.\n It has been replaced by :func:`histplot` and :func:`displot`, two functions\n with a modern API and many more capabilities.\n\n For a guide to updating, please see this notebook:\n\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n\n \"\"\"\n\n if kde and not hist:\n axes_level_suggestion = (\n \"`kdeplot` (an axes-level function for kernel density plots)\"\n )\n else:\n axes_level_suggestion = (\n \"`histplot` (an axes-level function for histograms)\"\n )\n\n msg = textwrap.dedent(f\"\"\"\n\n `distplot` is a deprecated function and will be removed in seaborn v0.14.0.\n\n Please adapt your code to use either `displot` (a figure-level function with\n similar flexibility) or {axes_level_suggestion}.\n\n For a guide to updating your code to use the new functions, please see\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n \"\"\")\n warnings.warn(msg, UserWarning, stacklevel=2)\n\n if ax is None:\n ax = plt.gca()\n\n # Intelligently label the support axis\n label_ax = bool(axlabel)\n if axlabel is None and hasattr(a, \"name\"):\n axlabel = a.name\n if axlabel is not None:\n label_ax = True\n\n # Support new-style API\n if x is not None:\n a = x\n\n # Make a a 1-d float array\n a = np.asarray(a, float)\n if a.ndim > 1:\n a = a.squeeze()\n\n # Drop null values from array\n a = remove_na(a)\n\n # Decide if the hist is normed\n norm_hist = norm_hist or kde or (fit is not None)\n\n # Handle dictionary defaults\n hist_kws = {} if hist_kws is None else hist_kws.copy()\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n rug_kws = {} if rug_kws is None else rug_kws.copy()\n fit_kws = {} if fit_kws is None else fit_kws.copy()\n\n # Get the color from the current color cycle\n if color is None:\n if vertical:\n line, = ax.plot(0, a.mean())\n else:\n line, = ax.plot(a.mean(), 0)\n color = line.get_color()\n line.remove()\n\n # Plug the label into the right kwarg dictionary\n if label is not None:\n if hist:\n hist_kws[\"label\"] = label\n elif kde:\n kde_kws[\"label\"] = label\n elif rug:\n rug_kws[\"label\"] = label\n elif fit:\n fit_kws[\"label\"] = label\n\n if hist:\n if bins is None:\n bins = min(_freedman_diaconis_bins(a), 50)\n hist_kws.setdefault(\"alpha\", 0.4)\n hist_kws.setdefault(\"density\", norm_hist)\n\n orientation = \"horizontal\" if vertical else \"vertical\"\n hist_color = hist_kws.pop(\"color\", color)\n ax.hist(a, bins, orientation=orientation,\n color=hist_color, **hist_kws)\n if hist_color != color:\n hist_kws[\"color\"] = hist_color\n\n axis = \"y\" if vertical else \"x\"\n\n if kde:\n kde_color = kde_kws.pop(\"color\", color)\n kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n if kde_color != color:\n kde_kws[\"color\"] = kde_color\n\n if rug:\n rug_color = rug_kws.pop(\"color\", color)\n rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n if rug_color != color:\n rug_kws[\"color\"] = rug_color\n\n if fit is not None:\n\n def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2448, "name": "remove_na", "kind": "ref", "category": "function", "info": " a = remove_na(a)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2481, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " bins = min(_freedman_diaconis_bins(a), 50)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2496, "name": "kdeplot", "kind": "ref", "category": "function", "info": " kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2502, "name": "rugplot", "kind": "ref", "category": "function", "info": " rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2508, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2509, "name": "pdf", "kind": "ref", "category": "function", "info": " return fit.pdf(x, *params)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2515, "name": "gaussian_kde", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2515, "name": "scotts_factor", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2516, "name": "_kde_support", "kind": "ref", "category": "function", "info": " x = _kde_support(a, bw, gridsize, cut, clip)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2517, "name": "fit", "kind": "ref", "category": "function", "info": " params = fit.fit(a)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2518, "name": "pdf", "kind": "ref", "category": "function", "info": " y = pdf(x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2527, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(axlabel)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2529, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(axlabel)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 73, "name": "user_data_dir", "kind": "def", "category": "function", "info": "def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):\n r\"\"\"Return full path to the user-specific data dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"roaming\" (boolean, default False) can be set True to use the Windows\n roaming appdata directory. That means that for users on a Windows\n network setup for roaming profiles, this user data will be\n sync'd on login. See\n \n for a discussion of issues.\n\n Typical user data directories are:\n Mac OS X: ~/Library/Application Support/\n Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined\n Win XP (not roaming): C:\\Documents and Settings\\\\Application Data\\\\\n Win XP (roaming): C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\n Win 7 (not roaming): C:\\Users\\\\AppData\\Local\\\\\n Win 7 (roaming): C:\\Users\\\\AppData\\Roaming\\\\\n\n For Unix, we follow the XDG spec and support $XDG_DATA_HOME.\n That means, by default \"~/.local/share/\".\n \"\"\"\n if system == \"win32\":\n if appauthor is None:\n appauthor = appname\n const = roaming and \"CSIDL_APPDATA\" or \"CSIDL_LOCAL_APPDATA\"\n path = os.path.normpath(_get_win_folder(const))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Application Support/')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_DATA_HOME', os.path.expanduser(\"~/.local/share\"))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 109, "name": "normpath", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(const))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 109, "name": "_get_win_folder", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(const))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 116, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('~/Library/Application Support/')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 120, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_DATA_HOME', os.path.expanduser(\"~/.local/share\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 128, "name": "site_data_dir", "kind": "def", "category": "function", "info": "def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):\n r\"\"\"Return full path to the user-shared data dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"multipath\" is an optional parameter only applicable to *nix\n which indicates that the entire list of data dirs should be\n returned. By default, the first item from XDG_DATA_DIRS is\n returned, or '/usr/local/share/',\n if XDG_DATA_DIRS is not set\n\n Typical site data directories are:\n Mac OS X: /Library/Application Support/\n Unix: /usr/local/share/ or /usr/share/\n Win XP: C:\\Documents and Settings\\All Users\\Application Data\\\\\n Vista: (Fail! \"C:\\ProgramData\" is a hidden *system* directory on Vista.)\n Win 7: C:\\ProgramData\\\\ # Hidden, but writeable on Win 7.\n\n For Unix, this is using the $XDG_DATA_DIRS[0] default.\n\n WARNING: Do not use this on Windows. See the Vista-Fail note above for why.\n \"\"\"\n if system == \"win32\":\n if appauthor is None:\n appauthor = appname\n path = os.path.normpath(_get_win_folder(\"CSIDL_COMMON_APPDATA\"))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n elif system == 'darwin':\n path = os.path.expanduser('/Library/Application Support')\n if appname:\n path = os.path.join(path, appname)\n else:\n # XDG default for $XDG_DATA_DIRS\n # only first, if multipath is False\n path = os.getenv('XDG_DATA_DIRS',\n os.pathsep.join(['/usr/local/share', '/usr/share']))\n pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]\n if appname:\n if version:\n appname = os.path.join(appname, version)\n pathlist = [os.sep.join([x, appname]) for x in pathlist]\n\n if multipath:\n path = os.pathsep.join(pathlist)\n else:\n path = pathlist[0]\n return path\n\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 162, "name": "normpath", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_COMMON_APPDATA\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 162, "name": "_get_win_folder", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_COMMON_APPDATA\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 169, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('/Library/Application Support')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 177, "name": "expanduser", "kind": "ref", "category": "function", "info": " pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 194, "name": "user_config_dir", "kind": "def", "category": "function", "info": "def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):\n r\"\"\"Return full path to the user-specific config dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"roaming\" (boolean, default False) can be set True to use the Windows\n roaming appdata directory. That means that for users on a Windows\n network setup for roaming profiles, this user data will be\n sync'd on login. See\n \n for a discussion of issues.\n\n Typical user config directories are:\n Mac OS X: ~/Library/Preferences/\n Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined\n Win *: same as user_data_dir\n\n For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.\n That means, by default \"~/.config/\".\n \"\"\"\n if system == \"win32\":\n path = user_data_dir(appname, appauthor, None, roaming)\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Preferences/')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(\"~/.config\"))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 224, "name": "user_data_dir", "kind": "ref", "category": "function", "info": " path = user_data_dir(appname, appauthor, None, roaming)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 226, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('~/Library/Preferences/')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 230, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(\"~/.config\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 238, "name": "site_config_dir", "kind": "def", "category": "function", "info": "def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):\n r\"\"\"Return full path to the user-shared data dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"multipath\" is an optional parameter only applicable to *nix\n which indicates that the entire list of config dirs should be\n returned. By default, the first item from XDG_CONFIG_DIRS is\n returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set\n\n Typical site config directories are:\n Mac OS X: same as site_data_dir\n Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in\n $XDG_CONFIG_DIRS\n Win *: same as site_data_dir\n Vista: (Fail! \"C:\\ProgramData\" is a hidden *system* directory on Vista.)\n\n For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False\n\n WARNING: Do not use this on Windows. See the Vista-Fail note above for why.\n \"\"\"\n if system == 'win32':\n path = site_data_dir(appname, appauthor)\n if appname and version:\n path = os.path.join(path, version)\n elif system == 'darwin':\n path = os.path.expanduser('/Library/Preferences')\n if appname:\n path = os.path.join(path, appname)\n else:\n # XDG default for $XDG_CONFIG_DIRS\n # only first, if multipath is False\n path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')\n pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]\n if appname:\n if version:\n appname = os.path.join(appname, version)\n pathlist = [os.sep.join([x, appname]) for x in pathlist]\n\n if multipath:\n path = os.pathsep.join(pathlist)\n else:\n path = pathlist[0]\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 269, "name": "site_data_dir", "kind": "ref", "category": "function", "info": " path = site_data_dir(appname, appauthor)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 273, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('/Library/Preferences')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 280, "name": "expanduser", "kind": "ref", "category": "function", "info": " pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 293, "name": "user_cache_dir", "kind": "def", "category": "function", "info": "def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):\n r\"\"\"Return full path to the user-specific cache dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"opinion\" (boolean) can be False to disable the appending of\n \"Cache\" to the base app data dir for Windows. See\n discussion below.\n\n Typical user cache directories are:\n Mac OS X: ~/Library/Caches/\n Unix: ~/.cache/ (XDG default)\n Win XP: C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\\Cache\n Vista: C:\\Users\\\\AppData\\Local\\\\\\Cache\n\n On Windows the only suggestion in the MSDN docs is that local settings go in\n the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming\n app data dir (the default returned by `user_data_dir` above). Apps typically\n put cache data somewhere *under* the given dir here. Some examples:\n ...\\Mozilla\\Firefox\\Profiles\\\\Cache\n ...\\Acme\\SuperApp\\Cache\\1.0\n OPINION: This function appends \"Cache\" to the `CSIDL_LOCAL_APPDATA` value.\n This can be disabled with the `opinion=False` option.\n \"\"\"\n if system == \"win32\":\n if appauthor is None:\n appauthor = appname\n path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n if opinion:\n path = os.path.join(path, \"Cache\")\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Caches')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 329, "name": "normpath", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 329, "name": "_get_win_folder", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 338, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('~/Library/Caches')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 342, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 350, "name": "user_state_dir", "kind": "def", "category": "function", "info": "def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):\n r\"\"\"Return full path to the user-specific state dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"roaming\" (boolean, default False) can be set True to use the Windows\n roaming appdata directory. That means that for users on a Windows\n network setup for roaming profiles, this user data will be\n sync'd on login. See\n \n for a discussion of issues.\n\n Typical user state directories are:\n Mac OS X: same as user_data_dir\n Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined\n Win *: same as user_data_dir\n\n For Unix, we follow this Debian proposal \n to extend the XDG spec and support $XDG_STATE_HOME.\n\n That means, by default \"~/.local/state/\".\n \"\"\"\n if system in [\"win32\", \"darwin\"]:\n path = user_data_dir(appname, appauthor, None, roaming)\n else:\n path = os.getenv('XDG_STATE_HOME', os.path.expanduser(\"~/.local/state\"))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 382, "name": "user_data_dir", "kind": "ref", "category": "function", "info": " path = user_data_dir(appname, appauthor, None, roaming)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 384, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_STATE_HOME', os.path.expanduser(\"~/.local/state\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 392, "name": "user_log_dir", "kind": "def", "category": "function", "info": "def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):\n r\"\"\"Return full path to the user-specific log dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"opinion\" (boolean) can be False to disable the appending of\n \"Logs\" to the base app data dir for Windows, and \"log\" to the\n base cache dir for Unix. See discussion below.\n\n Typical user log directories are:\n Mac OS X: ~/Library/Logs/\n Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined\n Win XP: C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\\Logs\n Vista: C:\\Users\\\\AppData\\Local\\\\\\Logs\n\n On Windows the only suggestion in the MSDN docs is that local settings\n go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in\n examples of what some windows apps use for a logs dir.)\n\n OPINION: This function appends \"Logs\" to the `CSIDL_LOCAL_APPDATA`\n value for Windows and appends \"log\" to the user cache dir for Unix.\n This can be disabled with the `opinion=False` option.\n \"\"\"\n if system == \"darwin\":\n path = os.path.join(\n os.path.expanduser('~/Library/Logs'),\n appname)\n elif system == \"win32\":\n path = user_data_dir(appname, appauthor, version)\n version = False\n if opinion:\n path = os.path.join(path, \"Logs\")\n else:\n path = user_cache_dir(appname, appauthor, version)\n version = False\n if opinion:\n path = os.path.join(path, \"log\")\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 426, "name": "expanduser", "kind": "ref", "category": "function", "info": " os.path.expanduser('~/Library/Logs'),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 429, "name": "user_data_dir", "kind": "ref", "category": "function", "info": " path = user_data_dir(appname, appauthor, version)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 434, "name": "user_cache_dir", "kind": "ref", "category": "function", "info": " path = user_cache_dir(appname, appauthor, version)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 443, "name": "AppDirs", "kind": "def", "category": "class", "info": "__init__\tuser_data_dir\tsite_data_dir\tuser_config_dir\tsite_config_dir\tuser_cache_dir\tuser_state_dir\tuser_log_dir"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 454, "name": "user_data_dir", "kind": "def", "category": "function", "info": " def user_data_dir(self):\n return user_data_dir(self.appname, self.appauthor,\n version=self.version, roaming=self.roaming)\n\n @property\n def site_data_dir(self):\n return site_data_dir(self.appname, self.appauthor,\n version=self.version, multipath=self.multipath)\n\n @property\n def user_config_dir(self):\n return user_config_dir(self.appname, self.appauthor,\n version=self.version, roaming=self.roaming)\n\n @property\n def site_config_dir(self):\n return site_config_dir(self.appname, self.appauthor,\n version=self.version, multipath=self.multipath)\n\n @property\n def user_cache_dir(self):\n return user_cache_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_state_dir(self):\n return user_state_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 455, "name": "user_data_dir", "kind": "ref", "category": "function", "info": " return user_data_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 459, "name": "site_data_dir", "kind": "def", "category": "function", "info": " def site_data_dir(self):\n return site_data_dir(self.appname, self.appauthor,\n version=self.version, multipath=self.multipath)\n\n @property\n def user_config_dir(self):\n return user_config_dir(self.appname, self.appauthor,\n version=self.version, roaming=self.roaming)\n\n @property\n def site_config_dir(self):\n return site_config_dir(self.appname, self.appauthor,\n version=self.version, multipath=self.multipath)\n\n @property\n def user_cache_dir(self):\n return user_cache_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_state_dir(self):\n return user_state_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 460, "name": "site_data_dir", "kind": "ref", "category": "function", "info": " return site_data_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 464, "name": "user_config_dir", "kind": "def", "category": "function", "info": " def user_config_dir(self):\n return user_config_dir(self.appname, self.appauthor,\n version=self.version, roaming=self.roaming)\n\n @property\n def site_config_dir(self):\n return site_config_dir(self.appname, self.appauthor,\n version=self.version, multipath=self.multipath)\n\n @property\n def user_cache_dir(self):\n return user_cache_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_state_dir(self):\n return user_state_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 465, "name": "user_config_dir", "kind": "ref", "category": "function", "info": " return user_config_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 469, "name": "site_config_dir", "kind": "def", "category": "function", "info": " def site_config_dir(self):\n return site_config_dir(self.appname, self.appauthor,\n version=self.version, multipath=self.multipath)\n\n @property\n def user_cache_dir(self):\n return user_cache_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_state_dir(self):\n return user_state_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 470, "name": "site_config_dir", "kind": "ref", "category": "function", "info": " return site_config_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 474, "name": "user_cache_dir", "kind": "def", "category": "function", "info": " def user_cache_dir(self):\n return user_cache_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_state_dir(self):\n return user_state_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 475, "name": "user_cache_dir", "kind": "ref", "category": "function", "info": " return user_cache_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 479, "name": "user_state_dir", "kind": "def", "category": "function", "info": " def user_state_dir(self):\n return user_state_dir(self.appname, self.appauthor,\n version=self.version)\n\n @property\n def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 480, "name": "user_state_dir", "kind": "ref", "category": "function", "info": " return user_state_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 484, "name": "user_log_dir", "kind": "def", "category": "function", "info": " def user_log_dir(self):\n return user_log_dir(self.appname, self.appauthor,\n version=self.version)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 485, "name": "user_log_dir", "kind": "ref", "category": "function", "info": " return user_log_dir(self.appname, self.appauthor,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 491, "name": "_get_win_folder_from_registry", "kind": "def", "category": "function", "info": "def _get_win_folder_from_registry(csidl_name):\n \"\"\"This is a fallback technique at best. I'm not sure if using the\n registry for this guarantees us the correct answer for all CSIDL_*\n names.\n \"\"\"\n import winreg as _winreg\n\n shell_folder_name = {\n \"CSIDL_APPDATA\": \"AppData\",\n \"CSIDL_COMMON_APPDATA\": \"Common AppData\",\n \"CSIDL_LOCAL_APPDATA\": \"Local AppData\",\n }[csidl_name]\n\n key = _winreg.OpenKey(\n _winreg.HKEY_CURRENT_USER,\n r\"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\"\n )\n dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n return dir\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 504, "name": "OpenKey", "kind": "ref", "category": "function", "info": " key = _winreg.OpenKey(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 508, "name": "QueryValueEx", "kind": "ref", "category": "function", "info": " dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 512, "name": "_get_win_folder_with_pywin32", "kind": "def", "category": "function", "info": "def _get_win_folder_with_pywin32(csidl_name):\n from win32com.shell import shellcon, shell\n dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n # Try to make this a unicode path because SHGetFolderPath does\n # not return unicode strings when there is unicode data in the\n # path.\n try:\n dir = unicode(dir)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n try:\n import win32api\n dir = win32api.GetShortPathName(dir)\n except ImportError:\n pass\n except UnicodeError:\n pass\n return dir\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 514, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 519, "name": "unicode", "kind": "ref", "category": "function", "info": " dir = unicode(dir)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 531, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " dir = win32api.GetShortPathName(dir)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 539, "name": "_get_win_folder_with_ctypes", "kind": "def", "category": "function", "info": "def _get_win_folder_with_ctypes(csidl_name):\n import ctypes\n\n csidl_const = {\n \"CSIDL_APPDATA\": 26,\n \"CSIDL_COMMON_APPDATA\": 35,\n \"CSIDL_LOCAL_APPDATA\": 28,\n }[csidl_name]\n\n buf = ctypes.create_unicode_buffer(1024)\n ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in buf:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf2 = ctypes.create_unicode_buffer(1024)\n if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n buf = buf2\n\n return buf.value\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 549, "name": "SHGetFolderPathW", "kind": "ref", "category": "function", "info": " ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 560, "name": "GetShortPathNameW", "kind": "ref", "category": "function", "info": " if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 565, "name": "_get_win_folder_with_jna", "kind": "def", "category": "function", "info": "def _get_win_folder_with_jna(csidl_name):\n import array\n from com.sun import jna\n from com.sun.jna.platform import win32\n\n buf_size = win32.WinDef.MAX_PATH * 2\n buf = array.zeros('c', buf_size)\n shell = win32.Shell32.INSTANCE\n shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf = array.zeros('c', buf_size)\n kernel = win32.Kernel32.INSTANCE\n if kernel.GetShortPathName(dir, buf, buf_size):\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n return dir\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 571, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 573, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 574, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 574, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 584, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 586, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " if kernel.GetShortPathName(dir, buf, buf_size):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 587, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 587, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 624, "name": "AppDirs", "kind": "ref", "category": "function", "info": " dirs = AppDirs(appname, appauthor, version=\"1.0\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 629, "name": "AppDirs", "kind": "ref", "category": "function", "info": " dirs = AppDirs(appname, appauthor)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 634, "name": "AppDirs", "kind": "ref", "category": "function", "info": " dirs = AppDirs(appname)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 639, "name": "AppDirs", "kind": "ref", "category": "function", "info": " dirs = AppDirs(appname, appauthor=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 39, "name": "strip_blank_lines", "kind": "def", "category": "function", "info": "def strip_blank_lines(l):\n \"Remove leading and trailing blank lines from a list of lines\"\n while l and not l[0].strip():\n del l[0]\n while l and not l[-1].strip():\n del l[-1]\n return l\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 48, "name": "Reader", "kind": "def", "category": "class", "info": "__init__\t__getitem__\treset\tread\tseek_next_non_empty_line\teof\tread_to_condition\tread_to_next_empty_line\tread_to_next_unindented_line\tpeek\tis_empty"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 65, "name": "reset", "kind": "ref", "category": "function", "info": " self.reset()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 70, "name": "reset", "kind": "def", "category": "function", "info": " def reset(self):\n self._l = 0 # current line nr\n\n def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 73, "name": "read", "kind": "def", "category": "function", "info": " def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 74, "name": "eof", "kind": "ref", "category": "function", "info": " if not self.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 81, "name": "seek_next_non_empty_line", "kind": "def", "category": "function", "info": " def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 88, "name": "eof", "kind": "def", "category": "function", "info": " def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 91, "name": "read_to_condition", "kind": "def", "category": "function", "info": " def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 94, "name": "condition_func", "kind": "ref", "category": "function", "info": " if condition_func(line):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 97, "name": "eof", "kind": "ref", "category": "function", "info": " if self.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 101, "name": "read_to_next_empty_line", "kind": "def", "category": "function", "info": " def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 102, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self.seek_next_non_empty_line()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 104, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 107, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_empty)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 109, "name": "read_to_next_unindented_line", "kind": "def", "category": "function", "info": " def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 110, "name": "is_unindented", "kind": "def", "category": "function", "info": " def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 112, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_unindented)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 114, "name": "peek", "kind": "def", "category": "function", "info": " def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 120, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 124, "name": "ParseError", "kind": "def", "category": "class", "info": "__str__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 135, "name": "NumpyDocString", "kind": "def", "category": "class", "info": "__init__\t__getitem__\t__setitem__\t__iter__\t__len__\t_is_at_section\t_strip\t_read_to_next_section\t_read_sections\t_parse_param_list\t_parse_see_also\t_parse_index\t_parse_summary\t_parse\t_error_location\t_str_header\t_str_indent\t_str_signature\t_str_summary\t_str_extended_summary\t_str_param_list\t_str_section\t_str_see_also\t_str_index\t__str__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 167, "name": "Reader", "kind": "ref", "category": "function", "info": " self._doc = Reader(docstring)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 171, "name": "_parse", "kind": "ref", "category": "function", "info": " self._parse()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 181, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"Unknown section {key}\", error=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 191, "name": "_is_at_section", "kind": "def", "category": "function", "info": " def _is_at_section(self):\n self._doc.seek_next_non_empty_line()\n\n if self._doc.eof():\n return False\n\n l1 = self._doc.peek().strip() # e.g. Parameters\n\n if l1.startswith('.. index::'):\n return True\n\n l2 = self._doc.peek(1).strip() # ---------- or ==========\n return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))\n\n def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 192, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self._doc.seek_next_non_empty_line()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 194, "name": "eof", "kind": "ref", "category": "function", "info": " if self._doc.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 197, "name": "peek", "kind": "ref", "category": "function", "info": " l1 = self._doc.peek().strip() # e.g. Parameters\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 202, "name": "peek", "kind": "ref", "category": "function", "info": " l2 = self._doc.peek(1).strip() # ---------- or ==========\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 205, "name": "_strip", "kind": "def", "category": "function", "info": " def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 218, "name": "_read_to_next_section", "kind": "def", "category": "function", "info": " def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 219, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 222, "name": "peek", "kind": "ref", "category": "function", "info": " if not self._doc.peek(-1).strip(): # previous line was empty\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 225, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section += self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 229, "name": "_read_sections", "kind": "def", "category": "function", "info": " def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 230, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._doc.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 231, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " data = self._read_to_next_section()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 239, "name": "_strip", "kind": "ref", "category": "function", "info": " yield name, self._strip(data[2:])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 241, "name": "_parse_param_list", "kind": "def", "category": "function", "info": " def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 242, "name": "Reader", "kind": "ref", "category": "function", "info": " r = Reader(content)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 244, "name": "eof", "kind": "ref", "category": "function", "info": " while not r.eof():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 245, "name": "read", "kind": "ref", "category": "function", "info": " header = r.read().strip()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 254, "name": "read_to_next_unindented_line", "kind": "ref", "category": "function", "info": " desc = r.read_to_next_unindented_line()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 255, "name": "dedent_lines", "kind": "ref", "category": "function", "info": " desc = dedent_lines(desc)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 256, "name": "strip_blank_lines", "kind": "ref", "category": "function", "info": " desc = strip_blank_lines(desc)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 298, "name": "_parse_see_also", "kind": "def", "category": "function", "info": " def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 309, "name": "parse_item_name", "kind": "def", "category": "function", "info": " def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 313, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{text} is not a item name\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 314, "name": "group", "kind": "ref", "category": "function", "info": " role = m.group('role')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 316, "name": "end", "kind": "ref", "category": "function", "info": " return name, role, m.end()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 326, "name": "group", "kind": "ref", "category": "function", "info": " description = line_match.group('desc')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 327, "name": "group", "kind": "ref", "category": "function", "info": " if line_match.group('trailing') and description:\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 328, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 330, "name": "end", "kind": "ref", "category": "function", "info": " 'line \"%s\"' % (line_match.end('trailing'), line),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 336, "name": "group", "kind": "ref", "category": "function", "info": " text = line_match.group('allfuncs')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 340, "name": "parse_item_name", "kind": "ref", "category": "function", "info": " name, role, match_end = parse_item_name(text)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 348, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{line} is not a item name\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 351, "name": "_parse_index", "kind": "def", "category": "function", "info": " def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 357, "name": "strip_each_in", "kind": "def", "category": "function", "info": " def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 363, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out['default'] = strip_each_in(section[1].split(','))[0]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 367, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out[line[1]] = strip_each_in(line[2].split(','))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 370, "name": "_parse_summary", "kind": "def", "category": "function", "info": " def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 372, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if self._is_at_section():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 377, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " summary = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 382, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 389, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 390, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " self['Extended Summary'] = self._read_to_next_section()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 392, "name": "_parse", "kind": "def", "category": "function", "info": " def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 393, "name": "reset", "kind": "ref", "category": "function", "info": " self._doc.reset()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 394, "name": "_parse_summary", "kind": "ref", "category": "function", "info": " self._parse_summary()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 396, "name": "_read_sections", "kind": "ref", "category": "function", "info": " sections = list(self._read_sections())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 414, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"The section {section} appears twice\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 418, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(content)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 420, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 423, "name": "_parse_index", "kind": "ref", "category": "function", "info": " self['index'] = self._parse_index(section, content)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 425, "name": "_parse_see_also", "kind": "ref", "category": "function", "info": " self['See Also'] = self._parse_see_also(content)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 429, "name": "_error_location", "kind": "def", "category": "function", "info": " def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 444, "name": "_str_header", "kind": "def", "category": "function", "info": " def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 447, "name": "_str_indent", "kind": "def", "category": "function", "info": " def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 453, "name": "_str_signature", "kind": "def", "category": "function", "info": " def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 459, "name": "_str_summary", "kind": "def", "category": "function", "info": " def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 465, "name": "_str_extended_summary", "kind": "def", "category": "function", "info": " def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 471, "name": "_str_param_list", "kind": "def", "category": "function", "info": " def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 474, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 483, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent(param.desc)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 487, "name": "_str_section", "kind": "def", "category": "function", "info": " def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 490, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 495, "name": "_str_see_also", "kind": "def", "category": "function", "info": " def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 499, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(\"See Also\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 516, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([' '.join(desc)])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 520, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([self.empty_description])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 527, "name": "_str_index", "kind": "def", "category": "function", "info": " def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 547, "name": "_str_signature", "kind": "ref", "category": "function", "info": " out += self._str_signature()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 548, "name": "_str_summary", "kind": "ref", "category": "function", "info": " out += self._str_summary()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 549, "name": "_str_extended_summary", "kind": "ref", "category": "function", "info": " out += self._str_extended_summary()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 552, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 553, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section('Warnings')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 554, "name": "_str_see_also", "kind": "ref", "category": "function", "info": " out += self._str_see_also(func_role)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 556, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section(s)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 558, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 559, "name": "_str_index", "kind": "ref", "category": "function", "info": " out += self._str_index()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 571, "name": "dedent_lines", "kind": "def", "category": "function", "info": "def dedent_lines(lines):\n \"\"\"Deindent a list of lines maximally\"\"\"\n return textwrap.dedent(\"\\n\".join(lines)).split(\"\\n\")\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 576, "name": "header", "kind": "def", "category": "function", "info": "def header(text, style='-'):\n return text + '\\n' + style*len(text) + '\\n'\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 580, "name": "FunctionDoc", "kind": "def", "category": "class", "info": "__init__\tget_func\t__str__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 592, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 608, "name": "get_func", "kind": "def", "category": "function", "info": " def get_func(self):\n func_name = getattr(self._f, '__name__', self.__class__.__name__)\n if inspect.isclass(self._f):\n func = getattr(self._f, '__call__', self._f.__init__)\n else:\n func = self._f\n return func, func_name\n\n def __str__(self):\n out = ''\n\n func, func_name = self.get_func()\n\n roles = {'func': 'function',\n 'meth': 'method'}\n\n if self._role:\n if self._role not in roles:\n print(f\"Warning: invalid role {self._role}\")\n out += f\".. {roles.get(self._role, '')}:: {func_name}\\n \\n\\n\"\n\n out += super().__str__(func_role=self._role)\n return out\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 619, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 633, "name": "ClassDoc", "kind": "def", "category": "class", "info": "__init__\tmethods\tproperties\t_is_show_member"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 668, "name": "splitlines_x", "kind": "def", "category": "function", "info": " def splitlines_x(s):\n if not s:\n return []\n else:\n return s.splitlines()\n for field, items in [('Methods', self.methods),\n ('Attributes', self.properties)]:\n if not self[field]:\n doc_list = []\n for name in sorted(items):\n if (name in _exclude or\n (_members and name not in _members)):\n continue\n try:\n doc_item = pydoc.getdoc(getattr(self._cls, name))\n doc_list.append(\n Parameter(name, '', splitlines_x(doc_item)))\n except AttributeError:\n pass # method doesn't exist\n self[field] = doc_list\n\n @property\n def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 684, "name": "splitlines_x", "kind": "ref", "category": "function", "info": " Parameter(name, '', splitlines_x(doc_item)))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 690, "name": "methods", "kind": "def", "category": "function", "info": " def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 697, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 700, "name": "properties", "kind": "def", "category": "function", "info": " def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 707, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 709, "name": "_is_show_member", "kind": "def", "category": "function", "info": " def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 30, "name": "husl_to_rgb", "kind": "def", "category": "function", "info": "def husl_to_rgb(h, s, l):\n return lch_to_rgb(*husl_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "husl_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 34, "name": "husl_to_hex", "kind": "def", "category": "function", "info": "def husl_to_hex(h, s, l):\n return rgb_to_hex(husl_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 38, "name": "rgb_to_husl", "kind": "def", "category": "function", "info": "def rgb_to_husl(r, g, b):\n return lch_to_husl(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "lch_to_husl", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 42, "name": "hex_to_husl", "kind": "def", "category": "function", "info": "def hex_to_husl(hex):\n return rgb_to_husl(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 46, "name": "huslp_to_rgb", "kind": "def", "category": "function", "info": "def huslp_to_rgb(h, s, l):\n return lch_to_rgb(*huslp_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "huslp_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 50, "name": "huslp_to_hex", "kind": "def", "category": "function", "info": "def huslp_to_hex(h, s, l):\n return rgb_to_hex(huslp_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "huslp_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 54, "name": "rgb_to_huslp", "kind": "def", "category": "function", "info": "def rgb_to_huslp(r, g, b):\n return lch_to_huslp(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "lch_to_huslp", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 58, "name": "hex_to_huslp", "kind": "def", "category": "function", "info": "def hex_to_huslp(hex):\n return rgb_to_huslp(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "rgb_to_huslp", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 62, "name": "lch_to_rgb", "kind": "def", "category": "function", "info": "def lch_to_rgb(l, c, h):\n return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "xyz_to_rgb", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "luv_to_xyz", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "lch_to_luv", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 66, "name": "rgb_to_lch", "kind": "def", "category": "function", "info": "def rgb_to_lch(r, g, b):\n return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "luv_to_lch", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "xyz_to_luv", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "rgb_to_xyz", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 70, "name": "max_chroma", "kind": "def", "category": "function", "info": "def max_chroma(L, H):\n hrad = math.radians(H)\n sinH = (math.sin(hrad))\n cosH = (math.cos(hrad))\n sub1 = (math.pow(L + 16, 3.0) / 1560896.0)\n sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)\n result = float(\"inf\")\n for row in m:\n m1 = row[0]\n m2 = row[1]\n m3 = row[2]\n top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)\n rbottom = (0.86330 * m3 - 0.17266 * m2)\n lbottom = (0.12949 * m3 - 0.38848 * m1)\n bottom = (rbottom * sinH + lbottom * cosH) * sub2\n\n for t in (0.0, 1.0):\n C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))\n if C > 0.0 and C < result:\n result = C\n return result\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 93, "name": "_hrad_extremum", "kind": "def", "category": "function", "info": "def _hrad_extremum(L):\n lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0\n rhs = 1107.0 / 125000.0\n sub = lhs if lhs > rhs else 10.0 * L / 9033.0\n chroma = float(\"inf\")\n result = None\n for row in m:\n for limit in (0.0, 1.0):\n [m1, m2, m3] = row\n top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit\n bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub\n hrad = math.atan2(top, bottom)\n # This is a math hack to deal with tan quadrants, I'm too lazy to figure\n # out how to do this properly\n if limit == 0.0:\n hrad += math.pi\n test = max_chroma(L, math.degrees(hrad))\n if test < chroma:\n chroma = test\n result = hrad\n return result\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 109, "name": "max_chroma", "kind": "ref", "category": "function", "info": " test = max_chroma(L, math.degrees(hrad))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 116, "name": "max_chroma_pastel", "kind": "def", "category": "function", "info": "def max_chroma_pastel(L):\n H = math.degrees(_hrad_extremum(L))\n return max_chroma(L, H)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 117, "name": "_hrad_extremum", "kind": "ref", "category": "function", "info": " H = math.degrees(_hrad_extremum(L))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 118, "name": "max_chroma", "kind": "ref", "category": "function", "info": " return max_chroma(L, H)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 121, "name": "dot_product", "kind": "def", "category": "function", "info": "def dot_product(a, b):\n return sum(map(operator.mul, a, b))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 125, "name": "f", "kind": "def", "category": "function", "info": "def f(t):\n if t > lab_e:\n return (math.pow(t, 1.0 / 3.0))\n else:\n return (7.787 * t + 16.0 / 116.0)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 132, "name": "f_inv", "kind": "def", "category": "function", "info": "def f_inv(t):\n if math.pow(t, 3.0) > lab_e:\n return (math.pow(t, 3.0))\n else:\n return (116.0 * t - 16.0) / lab_k\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 139, "name": "from_linear", "kind": "def", "category": "function", "info": "def from_linear(c):\n if c <= 0.0031308:\n return 12.92 * c\n else:\n return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 146, "name": "to_linear", "kind": "def", "category": "function", "info": "def to_linear(c):\n a = 0.055\n\n if c > 0.04045:\n return (math.pow((c + a) / (1.0 + a), 2.4))\n else:\n return (c / 12.92)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 155, "name": "rgb_prepare", "kind": "def", "category": "function", "info": "def rgb_prepare(triple):\n ret = []\n for ch in triple:\n ch = round(ch, 3)\n\n if ch < -0.0001 or ch > 1.0001:\n raise Exception(f\"Illegal RGB value {ch:f}\")\n\n if ch < 0:\n ch = 0\n if ch > 1:\n ch = 1\n\n # Fix for Python 3 which by default rounds 4.5 down to 4.0\n # instead of Python 2 which is rounded to 5.0 which caused\n # a couple off by one errors in the tests. Tests now all pass\n # in Python 2 and Python 3\n ret.append(int(round(ch * 255 + 0.001, 0)))\n\n return ret\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 177, "name": "hex_to_rgb", "kind": "def", "category": "function", "info": "def hex_to_rgb(hex):\n if hex.startswith('#'):\n hex = hex[1:]\n r = int(hex[0:2], 16) / 255.0\n g = int(hex[2:4], 16) / 255.0\n b = int(hex[4:6], 16) / 255.0\n return [r, g, b]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 186, "name": "rgb_to_hex", "kind": "def", "category": "function", "info": "def rgb_to_hex(triple):\n [r, g, b] = triple\n return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 188, "name": "rgb_prepare", "kind": "ref", "category": "function", "info": " return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 191, "name": "xyz_to_rgb", "kind": "def", "category": "function", "info": "def xyz_to_rgb(triple):\n xyz = map(lambda row: dot_product(row, triple), m)\n return list(map(from_linear, xyz))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 192, "name": "dot_product", "kind": "ref", "category": "function", "info": " xyz = map(lambda row: dot_product(row, triple), m)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 196, "name": "rgb_to_xyz", "kind": "def", "category": "function", "info": "def rgb_to_xyz(triple):\n rgbl = list(map(to_linear, triple))\n return list(map(lambda row: dot_product(row, rgbl), m_inv))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 198, "name": "dot_product", "kind": "ref", "category": "function", "info": " return list(map(lambda row: dot_product(row, rgbl), m_inv))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 201, "name": "xyz_to_luv", "kind": "def", "category": "function", "info": "def xyz_to_luv(triple):\n X, Y, Z = triple\n\n if X == Y == Z == 0.0:\n return [0.0, 0.0, 0.0]\n\n varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))\n varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))\n L = 116.0 * f(Y / refY) - 16.0\n\n # Black will create a divide-by-zero error\n if L == 0.0:\n return [0.0, 0.0, 0.0]\n\n U = 13.0 * L * (varU - refU)\n V = 13.0 * L * (varV - refV)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 209, "name": "f", "kind": "ref", "category": "function", "info": " L = 116.0 * f(Y / refY) - 16.0\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 221, "name": "luv_to_xyz", "kind": "def", "category": "function", "info": "def luv_to_xyz(triple):\n L, U, V = triple\n\n if L == 0:\n return [0.0, 0.0, 0.0]\n\n varY = f_inv((L + 16.0) / 116.0)\n varU = U / (13.0 * L) + refU\n varV = V / (13.0 * L) + refV\n Y = varY * refY\n X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)\n Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)\n\n return [X, Y, Z]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 227, "name": "f_inv", "kind": "ref", "category": "function", "info": " varY = f_inv((L + 16.0) / 116.0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 237, "name": "luv_to_lch", "kind": "def", "category": "function", "info": "def luv_to_lch(triple):\n L, U, V = triple\n\n C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))\n hrad = (math.atan2(V, U))\n H = math.degrees(hrad)\n if H < 0.0:\n H = 360.0 + H\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 249, "name": "lch_to_luv", "kind": "def", "category": "function", "info": "def lch_to_luv(triple):\n L, C, H = triple\n\n Hrad = math.radians(H)\n U = (math.cos(Hrad) * C)\n V = (math.sin(Hrad) * C)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 259, "name": "husl_to_lch", "kind": "def", "category": "function", "info": "def husl_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma(L, H)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 267, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 273, "name": "lch_to_husl", "kind": "def", "category": "function", "info": "def lch_to_husl(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma(L, H)\n S = C / mx * 100.0\n\n return [H, S, L]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 281, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 287, "name": "huslp_to_lch", "kind": "def", "category": "function", "info": "def huslp_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma_pastel(L)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 295, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 301, "name": "lch_to_huslp", "kind": "def", "category": "function", "info": "def lch_to_huslp(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma_pastel(L)\n S = C / mx * 100.0\n\n return [H, S, L]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 309, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 81, "name": "gaussian_kde", "kind": "def", "category": "class", "info": "__init__\tevaluate\tscotts_factor\tsilverman_factor\tset_bandwidth\t_compute_covariance\tpdf\tweights\tneff"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 202, "name": "astype", "kind": "ref", "category": "function", "info": " self._weights = atleast_1d(weights).astype(float)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 210, "name": "set_bandwidth", "kind": "ref", "category": "function", "info": " self.set_bandwidth(bw_method=bw_method)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 212, "name": "evaluate", "kind": "def", "category": "function", "info": " def evaluate(self, points):\n \"\"\"Evaluate the estimated pdf on a set of points.\n\n Parameters\n ----------\n points : (# of dimensions, # of points)-array\n Alternatively, a (# of dimensions,) vector can be passed in and\n treated as a single point.\n\n Returns\n -------\n values : (# of points,)-array\n The values at each point.\n\n Raises\n ------\n ValueError : if the dimensionality of the input points is different than\n the dimensionality of the KDE.\n\n \"\"\"\n points = atleast_2d(asarray(points))\n\n d, m = points.shape\n if d != self.d:\n if d == 1 and m == self.d:\n # points was passed in as a row vector\n points = reshape(points, (self.d, 1))\n m = 1\n else:\n msg = f\"points have dimension {d}, dataset has dimension {self.d}\"\n raise ValueError(msg)\n\n output_dtype = np.common_type(self.covariance, points)\n result = zeros((m,), dtype=output_dtype)\n\n whitening = linalg.cholesky(self.inv_cov)\n scaled_dataset = dot(whitening, self.dataset)\n scaled_points = dot(whitening, points)\n\n if m >= self.n:\n # there are more points than data, so loop over data\n for i in range(self.n):\n diff = scaled_dataset[:, i, newaxis] - scaled_points\n energy = sum(diff * diff, axis=0) / 2.0\n result += self.weights[i]*exp(-energy)\n else:\n # loop over points\n for i in range(m):\n diff = scaled_dataset - scaled_points[:, i, newaxis]\n energy = sum(diff * diff, axis=0) / 2.0\n result[i] = sum(exp(-energy)*self.weights, axis=0)\n\n result = result / self._norm_factor\n\n return result\n\n __call__ = evaluate\n\n def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 270, "name": "scotts_factor", "kind": "def", "category": "function", "info": " def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 280, "name": "silverman_factor", "kind": "def", "category": "function", "info": " def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 298, "name": "set_bandwidth", "kind": "def", "category": "function", "info": " def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 330, "name": "_bw_method", "kind": "ref", "category": "function", "info": " self.covariance_factor = lambda: self._bw_method(self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 336, "name": "_compute_covariance", "kind": "ref", "category": "function", "info": " self._compute_covariance()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 338, "name": "_compute_covariance", "kind": "def", "category": "function", "info": " def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 342, "name": "covariance_factor", "kind": "ref", "category": "function", "info": " self.factor = self.covariance_factor()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 354, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 364, "name": "evaluate", "kind": "ref", "category": "function", "info": " return self.evaluate(x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 367, "name": "weights", "kind": "def", "category": "function", "info": " def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 375, "name": "neff", "kind": "def", "category": "function", "info": " def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 33, "name": "InfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 58, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> \"NegativeInfinityType\":\n return NegativeInfinity\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 62, "name": "InfinityType", "kind": "ref", "category": "function", "info": "Infinity = InfinityType()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 65, "name": "NegativeInfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 90, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> InfinityType:\n return Infinity\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 94, "name": "NegativeInfinityType", "kind": "ref", "category": "function", "info": "NegativeInfinity = NegativeInfinityType()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 127, "name": "InvalidVersion", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 133, "name": "_BaseVersion", "kind": "def", "category": "class", "info": "__hash__\t__lt__\t__le__\t__eq__\t__ge__\t__gt__\t__ne__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 213, "name": "Version", "kind": "def", "category": "class", "info": "__init__\t__repr__\t__str__\tepoch\trelease\tpre\tpost\tdev\tlocal\tpublic\tbase_version\tis_prerelease\tis_postrelease\tis_devrelease\tmajor\tminor\tmicro"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 222, "name": "InvalidVersion", "kind": "ref", "category": "function", "info": " raise InvalidVersion(f\"Invalid version: '{version}'\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 225, "name": "_Version", "kind": "ref", "category": "function", "info": " self._version = _Version(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 227, "name": "group", "kind": "ref", "category": "function", "info": " release=tuple(int(i) for i in match.group(\"release\").split(\".\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 229, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " post=_parse_letter_version(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "_parse_local_version", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "group", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 237, "name": "_cmpkey", "kind": "ref", "category": "function", "info": " self._key = _cmpkey(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 278, "name": "epoch", "kind": "def", "category": "function", "info": " def epoch(self) -> int:\n _epoch: int = self._version.epoch\n return _epoch\n\n @property\n def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 283, "name": "release", "kind": "def", "category": "function", "info": " def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 288, "name": "pre", "kind": "def", "category": "function", "info": " def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 293, "name": "post", "kind": "def", "category": "function", "info": " def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 297, "name": "dev", "kind": "def", "category": "function", "info": " def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 301, "name": "local", "kind": "def", "category": "function", "info": " def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 308, "name": "public", "kind": "def", "category": "function", "info": " def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 312, "name": "base_version", "kind": "def", "category": "function", "info": " def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 325, "name": "is_prerelease", "kind": "def", "category": "function", "info": " def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 329, "name": "is_postrelease", "kind": "def", "category": "function", "info": " def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 333, "name": "is_devrelease", "kind": "def", "category": "function", "info": " def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 337, "name": "major", "kind": "def", "category": "function", "info": " def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 341, "name": "minor", "kind": "def", "category": "function", "info": " def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 345, "name": "micro", "kind": "def", "category": "function", "info": " def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 349, "name": "_parse_letter_version", "kind": "def", "category": "function", "info": "def _parse_letter_version(\n letter: str, number: Union[str, bytes, SupportsInt]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 388, "name": "_parse_local_version", "kind": "def", "category": "function", "info": "def _parse_local_version(local: str) -> Optional[LocalType]:\n \"\"\"\n Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").\n \"\"\"\n if local is not None:\n return tuple(\n part.lower() if not part.isdigit() else int(part)\n for part in _local_version_separators.split(local)\n )\n return None\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 400, "name": "_cmpkey", "kind": "def", "category": "function", "info": "def _cmpkey(\n epoch: int,\n release: Tuple[int, ...],\n pre: Optional[Tuple[str, int]],\n post: Optional[Tuple[str, int]],\n dev: Optional[Tuple[str, int]],\n local: Optional[Tuple[SubLocalType]],\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 29, "name": "_index_to_label", "kind": "def", "category": "function", "info": "def _index_to_label(index):\n \"\"\"Convert a pandas index or multiindex to an axis label.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return \"-\".join(map(to_utf8, index.names))\n else:\n return index.name\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 37, "name": "_index_to_ticklabels", "kind": "def", "category": "function", "info": "def _index_to_ticklabels(index):\n \"\"\"Convert a pandas index or multiindex into ticklabels.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return [\"-\".join(map(to_utf8, i)) for i in index.values]\n else:\n return index.values\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 45, "name": "_convert_colors", "kind": "def", "category": "function", "info": "def _convert_colors(colors):\n \"\"\"Convert either a list of colors or nested lists of colors to RGB.\"\"\"\n to_rgb = mpl.colors.to_rgb\n\n try:\n to_rgb(colors[0])\n # If this works, there is only one level of colors\n return list(map(to_rgb, colors))\n except ValueError:\n # If we get here, we have nested lists\n return [list(map(to_rgb, l)) for l in colors]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 50, "name": "to_rgb", "kind": "ref", "category": "function", "info": " to_rgb(colors[0])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 58, "name": "_matrix_mask", "kind": "def", "category": "function", "info": "def _matrix_mask(data, mask):\n \"\"\"Ensure that data and mask are compatible and add missing values.\n\n Values will be plotted for cells where ``mask`` is ``False``.\n\n ``data`` is expected to be a DataFrame; ``mask`` can be an array or\n a DataFrame.\n\n \"\"\"\n if mask is None:\n mask = np.zeros(data.shape, bool)\n\n if isinstance(mask, np.ndarray):\n # For array masks, ensure that shape matches data then convert\n if mask.shape != data.shape:\n raise ValueError(\"Mask must have the same shape as data.\")\n\n mask = pd.DataFrame(mask,\n index=data.index,\n columns=data.columns,\n dtype=bool)\n\n elif isinstance(mask, pd.DataFrame):\n # For DataFrame masks, ensure that semantic labels match data\n if not mask.index.equals(data.index) \\\n and mask.columns.equals(data.columns):\n err = \"Mask must have the same index and columns as data.\"\n raise ValueError(err)\n\n # Add any cells with missing data to the mask\n # This works around an issue where `plt.pcolormesh` doesn't represent\n # missing data properly\n mask = mask | pd.isnull(data)\n\n return mask\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 82, "name": "equals", "kind": "ref", "category": "function", "info": " if not mask.index.equals(data.index) \\\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 83, "name": "equals", "kind": "ref", "category": "function", "info": " and mask.columns.equals(data.columns):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 95, "name": "_HeatMapper", "kind": "def", "category": "class", "info": "__init__\t_determine_cmap_params\t_annotate_heatmap\t_skip_ticks\t_auto_ticks\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 111, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " mask = _matrix_mask(data, mask)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 113, "name": "masked_where", "kind": "ref", "category": "function", "info": " plot_data = np.ma.masked_where(np.asarray(mask), plot_data)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 119, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 121, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 128, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 130, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 139, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 141, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.xticks, self.xticklabels = self._skip_ticks(xticklabels,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 149, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 151, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.yticks, self.yticklabels = self._skip_ticks(yticklabels,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 155, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " xlabel = _index_to_label(data.columns)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 156, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " ylabel = _index_to_label(data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 161, "name": "_determine_cmap_params", "kind": "ref", "category": "function", "info": " self._determine_cmap_params(plot_data, vmin, vmax,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 190, "name": "_determine_cmap_params", "kind": "def", "category": "function", "info": " def _determine_cmap_params(self, plot_data, vmin, vmax,\n cmap, center, robust):\n \"\"\"Use some heuristics to set good defaults for colorbar and range.\"\"\"\n\n # plot_data is a np.ma.array instance\n calc_data = plot_data.astype(float).filled(np.nan)\n if vmin is None:\n if robust:\n vmin = np.nanpercentile(calc_data, 2)\n else:\n vmin = np.nanmin(calc_data)\n if vmax is None:\n if robust:\n vmax = np.nanpercentile(calc_data, 98)\n else:\n vmax = np.nanmax(calc_data)\n self.vmin, self.vmax = vmin, vmax\n\n # Choose default colormaps if not provided\n if cmap is None:\n if center is None:\n self.cmap = cm.rocket\n else:\n self.cmap = cm.icefire\n elif isinstance(cmap, str):\n self.cmap = mpl.cm.get_cmap(cmap)\n elif isinstance(cmap, list):\n self.cmap = mpl.colors.ListedColormap(cmap)\n else:\n self.cmap = cmap\n\n # Recenter a divergent colormap\n if center is not None:\n\n # Copy bad values\n # in mpl<3.2 only masked values are honored with \"bad\" color spec\n # (see https://github.com/matplotlib/matplotlib/pull/14257)\n bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n\n # under/over values are set for sure when cmap extremes\n # do not map to the same color as +-inf\n under = self.cmap(-np.inf)\n over = self.cmap(np.inf)\n under_set = under != self.cmap(0)\n over_set = over != self.cmap(self.cmap.N - 1)\n\n vrange = max(vmax - center, center - vmin)\n normlize = mpl.colors.Normalize(center - vrange, center + vrange)\n cmin, cmax = normlize([vmin, vmax])\n cc = np.linspace(cmin, cmax, 256)\n self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n self.cmap.set_bad(bad)\n if under_set:\n self.cmap.set_under(under)\n if over_set:\n self.cmap.set_over(over)\n\n def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 195, "name": "astype", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 195, "name": "filled", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 217, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 227, "name": "cmap", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 227, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 231, "name": "cmap", "kind": "ref", "category": "function", "info": " under = self.cmap(-np.inf)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 232, "name": "cmap", "kind": "ref", "category": "function", "info": " over = self.cmap(np.inf)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 233, "name": "cmap", "kind": "ref", "category": "function", "info": " under_set = under != self.cmap(0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 234, "name": "cmap", "kind": "ref", "category": "function", "info": " over_set = over != self.cmap(self.cmap.N - 1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 238, "name": "normlize", "kind": "ref", "category": "function", "info": " cmin, cmax = normlize([vmin, vmax])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 240, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 240, "name": "cmap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "set_bad", "kind": "ref", "category": "function", "info": " self.cmap.set_bad(bad)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 243, "name": "set_under", "kind": "ref", "category": "function", "info": " self.cmap.set_under(under)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 245, "name": "set_over", "kind": "ref", "category": "function", "info": " self.cmap.set_over(over)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 247, "name": "_annotate_heatmap", "kind": "def", "category": "function", "info": " def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 256, "name": "relative_luminance", "kind": "ref", "category": "function", "info": " lum = relative_luminance(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 263, "name": "_skip_ticks", "kind": "def", "category": "function", "info": " def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 276, "name": "_auto_ticks", "kind": "def", "category": "function", "info": " def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 278, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = ax.figure.dpi_scale_trans.inverted()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 279, "name": "transformed", "kind": "ref", "category": "function", "info": " bbox = ax.get_window_extent().transformed(transform)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 282, "name": "set_ticks", "kind": "ref", "category": "function", "info": " tick, = axis.set_ticks([0])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 283, "name": "get_size", "kind": "ref", "category": "function", "info": " fontsize = tick.label1.get_size()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 289, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " ticks, labels = self._skip_ticks(labels, tick_every)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 295, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 310, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 323, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 328, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 333, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(xticklabels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 334, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 338, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 340, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 342, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 350, "name": "_annotate_heatmap", "kind": "ref", "category": "function", "info": " self._annotate_heatmap(ax, mesh)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 353, "name": "heatmap", "kind": "def", "category": "function", "info": "def heatmap(\n data, *,\n vmin=None, vmax=None, cmap=None, center=None, robust=False,\n annot=None, fmt=\".2g\", annot_kws=None,\n linewidths=0, linecolor=\"white\",\n cbar=True, cbar_kws=None, cbar_ax=None,\n square=False, xticklabels=\"auto\", yticklabels=\"auto\",\n mask=None, ax=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 541, "name": "_HeatMapper", "kind": "ref", "category": "function", "info": " plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 553, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect(\"equal\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 558, "name": "_DendrogramPlotter", "kind": "def", "category": "class", "info": "__init__\t_calculate_linkage_scipy\t_calculate_linkage_fastcluster\tcalculated_linkage\tcalculate_dendrogram\treordered_ind\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 593, "name": "calculate_dendrogram", "kind": "ref", "category": "function", "info": " self.dendrogram = self.calculate_dendrogram()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 599, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " ticklabels = _index_to_ticklabels(self.data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 607, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.ylabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 615, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.xlabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 624, "name": "_calculate_linkage_scipy", "kind": "def", "category": "function", "info": " def _calculate_linkage_scipy(self):\n linkage = hierarchy.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 629, "name": "_calculate_linkage_fastcluster", "kind": "def", "category": "function", "info": " def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 638, "name": "linkage_vector", "kind": "ref", "category": "function", "info": " return fastcluster.linkage_vector(self.array,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 647, "name": "calculated_linkage", "kind": "def", "category": "function", "info": " def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 650, "name": "_calculate_linkage_fastcluster", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_fastcluster()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 657, "name": "_calculate_linkage_scipy", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_scipy()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 659, "name": "calculate_dendrogram", "kind": "def", "category": "function", "info": " def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 676, "name": "reordered_ind", "kind": "def", "category": "function", "info": " def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 700, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 705, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 709, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, number_of_leaves * 10)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 710, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 712, "name": "invert_xaxis", "kind": "ref", "category": "function", "info": " ax.invert_xaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 713, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 717, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, number_of_leaves * 10)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 718, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 720, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, bottom=True, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 724, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(self.xticklabels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 725, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 728, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 730, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 732, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 782, "name": "_DendrogramPlotter", "kind": "ref", "category": "function", "info": " plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 791, "name": "ClusterGrid", "kind": "def", "category": "class", "info": "__init__\t_preprocess_colors\tformat_data\tz_score\tstandard_scale\tdim_ratios\tcolor_list_to_matrix_and_cmap\tplot_dendrograms\tplot_colors\tplot_matrix\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 805, "name": "format_data", "kind": "ref", "category": "function", "info": " self.data2d = self.format_data(self.data, pivot_kws, z_score,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 808, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " self.mask = _matrix_mask(self.data2d, mask)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 813, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, row_colors, axis=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 815, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, col_colors, axis=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 827, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " width_ratios = self.dim_ratios(self.row_colors,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 830, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " height_ratios = self.dim_ratios(self.col_colors,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 841, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 842, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 843, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 844, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 850, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_colors = self._figure.add_subplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 853, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_colors = self._figure.add_subplot(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 856, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 862, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 869, "name": "_preprocess_colors", "kind": "def", "category": "function", "info": " def _preprocess_colors(self, data, colors, axis):\n \"\"\"Preprocess {row/col}_colors to extract labels and convert colors.\"\"\"\n labels = None\n\n if colors is not None:\n if isinstance(colors, (pd.DataFrame, pd.Series)):\n\n # If data is unindexed, raise\n if (not hasattr(data, \"index\") and axis == 0) or (\n not hasattr(data, \"columns\") and axis == 1\n ):\n axis_name = \"col\" if axis else \"row\"\n msg = (f\"{axis_name}_colors indices can't be matched with data \"\n f\"indices. Provide {axis_name}_colors as a non-indexed \"\n \"datatype, e.g. by using `.to_numpy()``\")\n raise TypeError(msg)\n\n # Ensure colors match data indices\n if axis == 0:\n colors = colors.reindex(data.index)\n else:\n colors = colors.reindex(data.columns)\n\n # Replace na's with white color\n # TODO We should set these to transparent instead\n colors = colors.astype(object).fillna('white')\n\n # Extract color values and labels from frame/series\n if isinstance(colors, pd.DataFrame):\n labels = list(colors.columns)\n colors = colors.T.values\n else:\n if colors.name is None:\n labels = [\"\"]\n else:\n labels = [colors.name]\n colors = colors.values\n\n colors = _convert_colors(colors)\n\n return colors, labels\n\n def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 888, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.index)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 890, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.columns)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 894, "name": "astype", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 894, "name": "fillna", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 907, "name": "_convert_colors", "kind": "ref", "category": "function", "info": " colors = _convert_colors(colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 911, "name": "format_data", "kind": "def", "category": "function", "info": " def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 926, "name": "z_score", "kind": "ref", "category": "function", "info": " data2d = self.z_score(data2d, z_score)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 928, "name": "standard_scale", "kind": "ref", "category": "function", "info": " data2d = self.standard_scale(data2d, standard_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 932, "name": "z_score", "kind": "def", "category": "function", "info": " def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 962, "name": "standard_scale", "kind": "def", "category": "function", "info": " def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 995, "name": "dim_ratios", "kind": "def", "category": "function", "info": " def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1014, "name": "color_list_to_matrix_and_cmap", "kind": "def", "category": "function", "info": " def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1038, "name": "to_rgb", "kind": "ref", "category": "function", "info": " mpl.colors.to_rgb(colors[0])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1062, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(list(unique_colors))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1065, "name": "plot_dendrograms", "kind": "def", "category": "function", "info": " def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1075, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_xticks([])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1076, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_yticks([])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1085, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_xticks([])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1086, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_yticks([])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1087, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1088, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1090, "name": "plot_colors", "kind": "def", "category": "function", "info": " def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1116, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1125, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1130, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1132, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_row_colors, left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1136, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1145, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1150, "name": "tick_right", "kind": "ref", "category": "function", "info": " self.ax_col_colors.yaxis.tick_right()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1151, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1153, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_col_colors, left=True, bottom=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1155, "name": "plot_matrix", "kind": "def", "category": "function", "info": " def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1188, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1192, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1193, "name": "get_rotation", "kind": "ref", "category": "function", "info": " ytl_rot = None if not ytl else ytl[0].get_rotation()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1194, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1195, "name": "set_label_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_label_position('right')\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1197, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1207, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1209, "name": "set_axis_on", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_on()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1210, "name": "set_position", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_position(self.cbar_pos)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1224, "name": "plot_dendrograms", "kind": "ref", "category": "function", "info": " self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1236, "name": "plot_colors", "kind": "ref", "category": "function", "info": " self.plot_colors(xind, yind, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1237, "name": "plot_matrix", "kind": "ref", "category": "function", "info": " self.plot_matrix(colorbar_kws, xind, yind, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1241, "name": "clustermap", "kind": "def", "category": "function", "info": "def clustermap(\n data, *,\n pivot_kws=None, method='average', metric='euclidean',\n z_score=None, standard_scale=None, figsize=(10, 10),\n cbar_kws=None, row_cluster=True, col_cluster=True,\n row_linkage=None, col_linkage=None,\n row_colors=None, col_colors=None, mask=None,\n dendrogram_ratio=.2, colors_ratio=0.03,\n cbar_pos=(.02, .8, .05, .18), tree_kws=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1409, "name": "ClusterGrid", "kind": "ref", "category": "function", "info": " plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 8, "name": "palplot", "kind": "def", "category": "function", "info": "def palplot(pal, size=1):\n \"\"\"Plot the values in a color palette as a horizontal array.\n\n Parameters\n ----------\n pal : sequence of matplotlib colors\n colors, i.e. as returned by seaborn.color_palette()\n size :\n scaling factor for size of plot\n\n \"\"\"\n n = len(pal)\n f, ax = plt.subplots(1, 1, figsize=(n * size, size))\n ax.imshow(np.arange(n).reshape(1, n),\n cmap=mpl.colors.ListedColormap(list(pal)),\n interpolation=\"nearest\", aspect=\"auto\")\n ax.set_xticks(np.arange(n) - .5)\n ax.set_yticks([-.5, .5])\n # Ensure nice border between colors\n ax.set_xticklabels([\"\" for _ in range(n)])\n # The proper way to set no ticks\n ax.yaxis.set_major_locator(ticker.NullLocator())\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 22, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap=mpl.colors.ListedColormap(list(pal)),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 24, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(n) - .5)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 25, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks([-.5, .5])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 27, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels([\"\" for _ in range(n)])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 29, "name": "set_major_locator", "kind": "ref", "category": "function", "info": " ax.yaxis.set_major_locator(ticker.NullLocator())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 32, "name": "dogplot", "kind": "def", "category": "function", "info": "def dogplot(*_, **__):\n \"\"\"Who's a good boy?\"\"\"\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n from io import BytesIO\n\n url = \"https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png\"\n pic = np.random.randint(2, 7)\n data = BytesIO(urlopen(url.format(pic)).read())\n img = plt.imread(data)\n f, ax = plt.subplots(figsize=(5, 5), dpi=100)\n f.subplots_adjust(0, 0, 1, 1)\n ax.imshow(img)\n ax.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 41, "name": "randint", "kind": "ref", "category": "function", "info": " pic = np.random.randint(2, 7)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 47, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 59, "name": "_ColorPalette", "kind": "def", "category": "class", "info": "__enter__\t__exit__\tas_hex\t_repr_html_"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 61, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n \"\"\"Open the context.\"\"\"\n from .rcmod import set_palette\n self._orig_palette = color_palette()\n set_palette(self)\n return self\n\n def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 64, "name": "color_palette", "kind": "ref", "category": "function", "info": " self._orig_palette = color_palette()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 65, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 68, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 71, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self._orig_palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 73, "name": "as_hex", "kind": "def", "category": "function", "info": " def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 75, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 76, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(hex)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 78, "name": "_repr_html_", "kind": "def", "category": "function", "info": " def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 83, "name": "as_hex", "kind": "ref", "category": "function", "info": " for i, c in enumerate(self.as_hex()):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 92, "name": "color_palette", "kind": "def", "category": "function", "info": "def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):\n \"\"\"Return a list of colors or continuous colormap defining a palette.\n\n Possible ``palette`` values include:\n - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)\n - Name of matplotlib colormap\n - 'husl' or 'hls'\n - 'ch:'\n - 'light:', 'dark:', 'blend:,',\n - A sequence of colors in any format matplotlib accepts\n\n Calling this function with ``palette=None`` will return the current\n matplotlib color cycle.\n\n This function can also be used in a ``with`` statement to temporarily\n set the color cycle for a plot or set of plots.\n\n See the :ref:`tutorial ` for more information.\n\n Parameters\n ----------\n palette : None, string, or sequence, optional\n Name of palette or None to return current palette. If a sequence, input\n colors are used but possibly cycled and desaturated.\n n_colors : int, optional\n Number of colors in the palette. If ``None``, the default will depend\n on how ``palette`` is specified. Named palettes default to 6 colors,\n but grabbing the current palette or passing in a list of colors will\n not change the number of colors unless this is specified. Asking for\n more colors than exist in the palette will cause it to cycle. Ignored\n when ``as_cmap`` is True.\n desat : float, optional\n Proportion to desaturate each color by.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n set_palette : Set the default color cycle for all plots.\n set_color_codes : Reassign color codes like ``\"b\"``, ``\"g\"``, etc. to\n colors from one of the seaborn palettes.\n\n Examples\n --------\n\n .. include:: ../docstrings/color_palette.rst\n\n \"\"\"\n if palette is None:\n palette = get_color_cycle()\n if n_colors is None:\n n_colors = len(palette)\n\n elif not isinstance(palette, str):\n palette = palette\n if n_colors is None:\n n_colors = len(palette)\n else:\n\n if n_colors is None:\n # Use all colors in a qualitative palette or 6 of another kind\n n_colors = QUAL_PALETTE_SIZES.get(palette, 6)\n\n if palette in SEABORN_PALETTES:\n # Named \"seaborn variant\" of matplotlib default color cycle\n palette = SEABORN_PALETTES[palette]\n\n elif palette == \"hls\":\n # Evenly spaced colors in cylindrical RGB space\n palette = hls_palette(n_colors, as_cmap=as_cmap)\n\n elif palette == \"husl\":\n # Evenly spaced colors in cylindrical Lab space\n palette = husl_palette(n_colors, as_cmap=as_cmap)\n\n elif palette.lower() == \"jet\":\n # Paternalism\n raise ValueError(\"No.\")\n\n elif palette.startswith(\"ch:\"):\n # Cubehelix palette with params specified in string\n args, kwargs = _parse_cubehelix_args(palette)\n palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n\n elif palette.startswith(\"light:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"dark:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"blend:\"):\n # blend palette between colors specified in string\n _, colors = palette.split(\":\")\n colors = colors.split(\",\")\n palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n\n else:\n try:\n # Perhaps a named matplotlib colormap?\n palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n except ValueError:\n raise ValueError(f\"{palette} is not a valid palette name\")\n\n if desat is not None:\n palette = [desaturate(c, desat) for c in palette]\n\n if not as_cmap:\n\n # Always return as many colors as we asked for\n pal_cycle = cycle(palette)\n palette = [next(pal_cycle) for _ in range(n_colors)]\n\n # Always return in r, g, b tuple format\n try:\n palette = map(mpl.colors.colorConverter.to_rgb, palette)\n palette = _ColorPalette(palette)\n except ValueError:\n raise ValueError(f\"Could not generate a palette for {palette}\")\n\n return palette\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 145, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " palette = get_color_cycle()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 165, "name": "hls_palette", "kind": "ref", "category": "function", "info": " palette = hls_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 169, "name": "husl_palette", "kind": "ref", "category": "function", "info": " palette = husl_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 177, "name": "_parse_cubehelix_args", "kind": "ref", "category": "function", "info": " args, kwargs = _parse_cubehelix_args(palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 178, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 186, "name": "light_palette", "kind": "ref", "category": "function", "info": " palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 194, "name": "dark_palette", "kind": "ref", "category": "function", "info": " palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 200, "name": "blend_palette", "kind": "ref", "category": "function", "info": " palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 205, "name": "mpl_palette", "kind": "ref", "category": "function", "info": " palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 210, "name": "desaturate", "kind": "ref", "category": "function", "info": " palette = [desaturate(c, desat) for c in palette]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 221, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " palette = _ColorPalette(palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 228, "name": "hls_palette", "kind": "def", "category": "function", "info": "def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa\n \"\"\"Get a set of evenly spaced colors in HLS hue space.\n\n h, l, and s should be between 0 and 1\n\n Parameters\n ----------\n\n n_colors : int\n number of colors in the palette\n h : float\n first hue\n l : float\n lightness\n s : float\n saturation\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n husl_palette : Make a palette using evenly spaced hues in the HUSL system.\n\n Examples\n --------\n\n Create a palette of 10 colors with the default parameters:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.hls_palette(10))\n\n Create a palette of 10 colors that begins at a different hue value:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.hls_palette(10, h=.5))\n\n Create a palette of 10 colors that are darker than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.hls_palette(10, l=.4))\n\n Create a palette of 10 colors that are less saturated than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.hls_palette(10, s=.4))\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues -= hues.astype(int)\n palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hls\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 291, "name": "astype", "kind": "ref", "category": "function", "info": " hues -= hues.astype(int)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 294, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hls\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 296, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 299, "name": "husl_palette", "kind": "def", "category": "function", "info": "def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa\n \"\"\"Get a set of evenly spaced colors in HUSL hue space.\n\n h, s, and l should be between 0 and 1\n\n Parameters\n ----------\n\n n_colors : int\n number of colors in the palette\n h : float\n first hue\n s : float\n saturation\n l : float\n lightness\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n hls_palette : Make a palette using evently spaced circular hues in the\n HSL system.\n\n Examples\n --------\n\n Create a palette of 10 colors with the default parameters:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.husl_palette(10))\n\n Create a palette of 10 colors that begins at a different hue value:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.husl_palette(10, h=.5))\n\n Create a palette of 10 colors that are darker than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.husl_palette(10, l=.4))\n\n Create a palette of 10 colors that are less saturated than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.husl_palette(10, s=.4))\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues *= 359\n s *= 99\n l *= 99 # noqa\n palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hsl\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 366, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 368, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hsl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 370, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 373, "name": "mpl_palette", "kind": "def", "category": "function", "info": "def mpl_palette(name, n_colors=6, as_cmap=False):\n \"\"\"Return discrete colors from a matplotlib palette.\n\n Note that this handles the qualitative colorbrewer palettes\n properly, although if you ask for more colors than a particular\n qualitative palette can provide you will get fewer than you are\n expecting. In contrast, asking for qualitative color brewer palettes\n using :func:`color_palette` will return the expected number of colors,\n but they will cycle.\n\n If you are using the IPython notebook, you can also use the function\n :func:`choose_colorbrewer_palette` to interactively select palettes.\n\n Parameters\n ----------\n name : string\n Name of the palette. This should be a named matplotlib colormap.\n n_colors : int\n Number of discrete colors in the palette.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n Examples\n --------\n\n Create a qualitative colorbrewer palette with 8 colors:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.mpl_palette(\"Set2\", 8))\n\n Create a sequential colorbrewer palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.mpl_palette(\"Blues\"))\n\n Create a diverging palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.mpl_palette(\"seismic\", 8))\n\n Create a \"dark\" sequential palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.mpl_palette(\"GnBu_d\"))\n\n \"\"\"\n if name.endswith(\"_d\"):\n sub_name = name[:-2]\n if sub_name.endswith(\"_r\"):\n reverse = True\n sub_name = sub_name[:-2]\n else:\n reverse = False\n pal = color_palette(sub_name, 2) + [\"#333333\"]\n if reverse:\n pal = pal[::-1]\n cmap = blend_palette(pal, n_colors, as_cmap=True)\n else:\n cmap = mpl.cm.get_cmap(name)\n\n if name in MPL_QUAL_PALS:\n bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]\n else:\n bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]\n palette = list(map(tuple, cmap(bins)[:, :3]))\n\n if as_cmap:\n return cmap\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 437, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal = color_palette(sub_name, 2) + [\"#333333\"]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 440, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(pal, n_colors, as_cmap=True)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 442, "name": "get_cmap", "kind": "ref", "category": "function", "info": " cmap = mpl.cm.get_cmap(name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 448, "name": "cmap", "kind": "ref", "category": "function", "info": " palette = list(map(tuple, cmap(bins)[:, :3]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 453, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 456, "name": "_color_to_rgb", "kind": "def", "category": "function", "info": "def _color_to_rgb(color, input):\n \"\"\"Add some more flexibility to color choices.\"\"\"\n if input == \"hls\":\n color = colorsys.hls_to_rgb(*color)\n elif input == \"husl\":\n color = husl.husl_to_rgb(*color)\n color = tuple(np.clip(color, 0, 1))\n elif input == \"xkcd\":\n color = xkcd_rgb[color]\n\n return mpl.colors.to_rgb(color)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 461, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " color = husl.husl_to_rgb(*color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 466, "name": "to_rgb", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgb(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 469, "name": "dark_palette", "kind": "def", "category": "function", "info": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from dark to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_dark_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex, rgb-tuple, or html color name\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n\n Generate a palette from an HTML color:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.dark_palette(\"purple\"))\n\n Generate a palette that decreases in lightness:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.dark_palette(\"seagreen\", reverse=True))\n\n Generate a palette from an HUSL-space seed:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.dark_palette((260, 75, 60), input=\"husl\"))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.dark_palette(\"#2ecc71\", as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 15\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 542, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 543, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 545, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 547, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 550, "name": "light_palette", "kind": "def", "category": "function", "info": "def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from light to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_light_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex code, html color name, or tuple in ``input`` space.\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n\n Generate a palette from an HTML color:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.light_palette(\"purple\"))\n\n Generate a palette that increases in lightness:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.light_palette(\"seagreen\", reverse=True))\n\n Generate a palette from an HUSL-space seed:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.light_palette((260, 75, 60), input=\"husl\"))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.light_palette(\"#2ecc71\", as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 95\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 623, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 624, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 626, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 628, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 631, "name": "diverging_palette", "kind": "def", "category": "function", "info": "def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa\n center=\"light\", as_cmap=False):\n \"\"\"Make a diverging palette between two HUSL colors.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_diverging_palette` function.\n\n Parameters\n ----------\n h_neg, h_pos : float in [0, 359]\n Anchor hues for negative and positive extents of the map.\n s : float in [0, 100], optional\n Anchor saturation for both extents of the map.\n l : float in [0, 100], optional\n Anchor lightness for both extents of the map.\n sep : int, optional\n Size of the intermediate region.\n n : int, optional\n Number of colors in the palette (if not returning a cmap)\n center : {\"light\", \"dark\"}, optional\n Whether the center of the palette is light or dark\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark values.\n light_palette : Create a sequential palette with light values.\n\n Examples\n --------\n\n Generate a blue-white-red palette:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.diverging_palette(240, 10, n=9))\n\n Generate a brighter green-white-purple palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.diverging_palette(150, 275, s=80, l=55, n=9))\n\n Generate a blue-black-red palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.diverging_palette(250, 15, s=75, l=40,\n ... n=9, center=\"dark\"))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.diverging_palette(220, 20, as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n \"\"\"\n palfunc = dict(dark=dark_palette, light=light_palette)[center]\n n_half = int(128 - (sep // 2))\n neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]\n mid = midpoint * sep\n pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 703, "name": "palfunc", "kind": "ref", "category": "function", "info": " neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 704, "name": "palfunc", "kind": "ref", "category": "function", "info": " pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 707, "name": "blend_palette", "kind": "ref", "category": "function", "info": " pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 711, "name": "blend_palette", "kind": "def", "category": "function", "info": "def blend_palette(colors, n_colors=6, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a palette that blends between a list of colors.\n\n Parameters\n ----------\n colors : sequence of colors in various formats interpreted by ``input``\n hex code, html color name, or tuple in ``input`` space.\n n_colors : int, optional\n Number of colors in the palette.\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n \"\"\"\n colors = [_color_to_rgb(color, input) for color in colors]\n name = \"blend\"\n pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n if not as_cmap:\n rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n pal = _ColorPalette(map(tuple, rgb_array))\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 728, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " colors = [_color_to_rgb(color, input) for color in colors]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 730, "name": "from_list", "kind": "ref", "category": "function", "info": " pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 732, "name": "pal", "kind": "ref", "category": "function", "info": " rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 733, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " pal = _ColorPalette(map(tuple, rgb_array))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 737, "name": "xkcd_palette", "kind": "def", "category": "function", "info": "def xkcd_palette(colors):\n \"\"\"Make a palette with color names from the xkcd color survey.\n\n See xkcd for the full list of colors: https://xkcd.com/color/rgb/\n\n This is just a simple wrapper around the ``seaborn.xkcd_rgb`` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the ``seaborn.xkcd_rgb`` dictionary.\n\n Returns\n -------\n palette : seaborn color palette\n Returns the list of colors as RGB tuples in an object that behaves like\n other seaborn color palettes.\n\n See Also\n --------\n crayon_palette : Make a palette with Crayola crayon colors.\n\n \"\"\"\n palette = [xkcd_rgb[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 761, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 764, "name": "crayon_palette", "kind": "def", "category": "function", "info": "def crayon_palette(colors):\n \"\"\"Make a palette with color names from Crayola crayons.\n\n Colors are taken from here:\n https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors\n\n This is just a simple wrapper around the ``seaborn.crayons`` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the ``seaborn.crayons`` dictionary.\n\n Returns\n -------\n palette : seaborn color palette\n Returns the list of colors as rgb tuples in an object that behaves like\n other seaborn color palettes.\n\n See Also\n --------\n xkcd_palette : Make a palette with named colors from the XKCD color survey.\n\n \"\"\"\n palette = [crayons[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 789, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 792, "name": "cubehelix_palette", "kind": "def", "category": "function", "info": "def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,\n light=.85, dark=.15, reverse=False, as_cmap=False):\n \"\"\"Make a sequential palette from the cubehelix system.\n\n This produces a colormap with linearly-decreasing (or increasing)\n brightness. That means that information will be preserved if printed to\n black and white or viewed by someone who is colorblind. \"cubehelix\" is\n also available as a matplotlib-based palette, but this function gives the\n user more control over the look of the palette and has a different set of\n defaults.\n\n In addition to using this function, it is also possible to generate a\n cubehelix palette generally in seaborn using a string-shorthand; see the\n example below.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n start : float, 0 <= start <= 3\n The hue at the start of the helix.\n rot : float\n Rotations around the hue wheel over the range of the palette.\n gamma : float 0 <= gamma\n Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)\n colors.\n hue : float, 0 <= hue <= 1\n Saturation of the colors.\n dark : float 0 <= dark <= 1\n Intensity of the darkest color in the palette.\n light : float 0 <= light <= 1\n Intensity of the lightest color in the palette.\n reverse : bool\n If True, the palette will go from dark to light.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n choose_cubehelix_palette : Launch an interactive widget to select cubehelix\n palette parameters.\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n\n References\n ----------\n Green, D. A. (2011). \"A colour scheme for the display of astronomical\n intensity images\". Bulletin of the Astromical Society of India, Vol. 39,\n p. 289-295.\n\n Examples\n --------\n\n Generate the default palette:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.cubehelix_palette())\n\n Rotate backwards from the same starting location:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.cubehelix_palette(rot=-.4))\n\n Use a different starting point and shorter rotation:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.cubehelix_palette(start=2.8, rot=.1))\n\n Reverse the direction of the lightness ramp:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.cubehelix_palette(reverse=True))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.cubehelix_palette(as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n Use the full lightness range:\n\n .. plot::\n :context: close-figs\n\n >>> cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n Use through the :func:`color_palette` interface:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.color_palette(\"ch:2,r=.2,l=.6\"))\n\n \"\"\"\n def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 904, "name": "get_color_function", "kind": "def", "category": "function", "info": " def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 906, "name": "color", "kind": "def", "category": "function", "info": " def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 921, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"red\": get_color_function(-0.14861, 1.78277),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 922, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"green\": get_color_function(-0.29227, -0.90649),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 923, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"blue\": get_color_function(1.97294, 0.0),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 926, "name": "LinearSegmentedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 929, "name": "cmap", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 929, "name": "tolist", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 937, "name": "cmap", "kind": "ref", "category": "function", "info": " pal_256 = cmap(x_256)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 938, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 941, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 944, "name": "_parse_cubehelix_args", "kind": "def", "category": "function", "info": "def _parse_cubehelix_args(argstr):\n \"\"\"Turn stringified cubehelix params into args/kwargs.\"\"\"\n\n if argstr.startswith(\"ch:\"):\n argstr = argstr[3:]\n\n if argstr.endswith(\"_r\"):\n reverse = True\n argstr = argstr[:-2]\n else:\n reverse = False\n\n if not argstr:\n return [], {\"reverse\": reverse}\n\n all_args = argstr.split(\",\")\n\n args = [float(a.strip(\" \")) for a in all_args if \"=\" not in a]\n\n kwargs = [a.split(\"=\") for a in all_args if \"=\" in a]\n kwargs = {k.strip(\" \"): float(v.strip(\" \")) for k, v in kwargs}\n\n kwarg_map = dict(\n s=\"start\", r=\"rot\", g=\"gamma\",\n h=\"hue\", l=\"light\", d=\"dark\", # noqa: E741\n )\n\n kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}\n\n if reverse:\n kwargs[\"reverse\"] = True\n\n return args, kwargs\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 979, "name": "set_color_codes", "kind": "def", "category": "function", "info": "def set_color_codes(palette=\"deep\"):\n \"\"\"Change how matplotlib color shorthands are interpreted.\n\n Calling this will change how shorthand codes like \"b\" or \"g\"\n are interpreted by matplotlib in subsequent plots.\n\n Parameters\n ----------\n palette : {deep, muted, pastel, dark, bright, colorblind}\n Named seaborn palette to use as the source of colors.\n\n See Also\n --------\n set : Color codes can be set through the high-level seaborn style\n manager.\n set_palette : Color codes can also be set through the function that\n sets the matplotlib color cycle.\n\n Examples\n --------\n\n Map matplotlib color codes to the default seaborn palette.\n\n .. plot::\n :context: close-figs\n\n >>> import matplotlib.pyplot as plt\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.set_color_codes()\n >>> _ = plt.plot([0, 1], color=\"r\")\n\n Use a different seaborn palette.\n\n .. plot::\n :context: close-figs\n\n >>> sns.set_color_codes(\"dark\")\n >>> _ = plt.plot([0, 1], color=\"g\")\n >>> _ = plt.plot([0, 2], color=\"m\")\n\n \"\"\"\n if palette == \"reset\":\n colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, 0., .75),\n (.75, .75, 0.), (0., .75, .75), (0., 0., 0.)]\n elif not isinstance(palette, str):\n err = \"set_color_codes requires a named seaborn palette\"\n raise TypeError(err)\n elif palette in SEABORN_PALETTES:\n if not palette.endswith(\"6\"):\n palette = palette + \"6\"\n colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]\n else:\n err = f\"Cannot set colors with palette '{palette}'\"\n raise ValueError(err)\n\n for code, color in zip(\"bgrmyck\", colors):\n rgb = mpl.colors.colorConverter.to_rgb(color)\n mpl.colors.colorConverter.colors[code] = rgb\n mpl.colors.colorConverter.cache[code] = rgb\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 1035, "name": "to_rgb", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 82, "name": "set_theme", "kind": "def", "category": "function", "info": "def set_theme(context=\"notebook\", style=\"darkgrid\", palette=\"deep\",\n font=\"sans-serif\", font_scale=1, color_codes=True, rc=None):\n \"\"\"\n Set aspects of the visual theme for all matplotlib and seaborn plots.\n\n This function changes the global defaults for all plots using the\n :ref:`matplotlib rcParams system `.\n The themeing is decomposed into several distinct sets of parameter values.\n\n The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>`\n and :doc:`color palette <../tutorial/color_palettes>` tutorials.\n\n Parameters\n ----------\n context : string or dict\n Scaling parameters, see :func:`plotting_context`.\n style : string or dict\n Axes style parameters, see :func:`axes_style`.\n palette : string or sequence\n Color palette, see :func:`color_palette`.\n font : string\n Font family, see matplotlib font manager.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n rc : dict or None\n Dictionary of rc parameter mappings to override the above.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_theme.rst\n\n \"\"\"\n set_context(context, font_scale)\n set_style(style, rc={\"font.family\": font})\n set_palette(palette, color_codes=color_codes)\n if rc is not None:\n mpl.rcParams.update(rc)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 119, "name": "set_context", "kind": "ref", "category": "function", "info": " set_context(context, font_scale)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 120, "name": "set_style", "kind": "ref", "category": "function", "info": " set_style(style, rc={\"font.family\": font})\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 121, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(palette, color_codes=color_codes)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 132, "name": "set_theme", "kind": "ref", "category": "function", "info": " set_theme(*args, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 135, "name": "reset_defaults", "kind": "def", "category": "function", "info": "def reset_defaults():\n \"\"\"Restore all RC params to default settings.\"\"\"\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 140, "name": "reset_orig", "kind": "def", "category": "function", "info": "def reset_orig():\n \"\"\"Restore all RC params to original settings (respects custom rc).\"\"\"\n from . import _orig_rc_params\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', mpl.cbook.MatplotlibDeprecationWarning)\n mpl.rcParams.update(_orig_rc_params)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 148, "name": "axes_style", "kind": "def", "category": "function", "info": "def axes_style(style=None, rc=None):\n \"\"\"\n Get the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n :ref:`matplotlib rcParams system `.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_style`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n style : None, dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/axes_style.rst\n\n \"\"\"\n if style is None:\n style_dict = {k: mpl.rcParams[k] for k in _style_keys}\n\n elif isinstance(style, dict):\n style_dict = style\n\n else:\n styles = [\"white\", \"dark\", \"whitegrid\", \"darkgrid\", \"ticks\"]\n if style not in styles:\n raise ValueError(f\"style must be one of {', '.join(styles)}\")\n\n # Define colors here\n dark_gray = \".15\"\n light_gray = \".8\"\n\n # Common parameters\n style_dict = {\n\n \"figure.facecolor\": \"white\",\n \"axes.labelcolor\": dark_gray,\n\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": dark_gray,\n \"ytick.color\": dark_gray,\n\n \"axes.axisbelow\": True,\n \"grid.linestyle\": \"-\",\n\n\n \"text.color\": dark_gray,\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\"Arial\", \"DejaVu Sans\", \"Liberation Sans\",\n \"Bitstream Vera Sans\", \"sans-serif\"],\n\n\n \"lines.solid_capstyle\": \"round\",\n \"patch.edgecolor\": \"w\",\n \"patch.force_edgecolor\": True,\n\n \"image.cmap\": \"rocket\",\n\n \"xtick.top\": False,\n \"ytick.right\": False,\n\n }\n\n # Set grid on or off\n if \"grid\" in style:\n style_dict.update({\n \"axes.grid\": True,\n })\n else:\n style_dict.update({\n \"axes.grid\": False,\n })\n\n # Set the color of the background, spines, and grids\n if style.startswith(\"dark\"):\n style_dict.update({\n\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"grid.color\": \"white\",\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style == \"whitegrid\":\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": light_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style in [\"white\", \"ticks\"]:\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": dark_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n # Show or hide the axes ticks\n if style == \"ticks\":\n style_dict.update({\n \"xtick.bottom\": True,\n \"ytick.left\": True,\n })\n else:\n style_dict.update({\n \"xtick.bottom\": False,\n \"ytick.left\": False,\n })\n\n # Remove entries that are not defined in the base list of valid keys\n # This lets us handle matplotlib <=/> 2.0\n style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _style_keys}\n style_dict.update(rc)\n\n # Wrap in an _AxesStyle object so this can be used in a with statement\n style_object = _AxesStyle(style_dict)\n\n return style_object\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 300, "name": "_AxesStyle", "kind": "ref", "category": "function", "info": " style_object = _AxesStyle(style_dict)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 305, "name": "set_style", "kind": "def", "category": "function", "info": "def set_style(style=None, rc=None):\n \"\"\"\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n :ref:`matplotlib rcParams system `.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n See :func:`axes_style` to get the parameter values.\n\n Parameters\n ----------\n style : dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_style.rst\n\n \"\"\"\n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 333, "name": "axes_style", "kind": "ref", "category": "function", "info": " style_object = axes_style(style, rc)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 337, "name": "plotting_context", "kind": "def", "category": "function", "info": "def plotting_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Get the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n :ref:`matplotlib rcParams system `.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_context`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n context : None, dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/plotting_context.rst\n\n \"\"\"\n if context is None:\n context_dict = {k: mpl.rcParams[k] for k in _context_keys}\n\n elif isinstance(context, dict):\n context_dict = context\n\n else:\n\n contexts = [\"paper\", \"notebook\", \"talk\", \"poster\"]\n if context not in contexts:\n raise ValueError(f\"context must be in {', '.join(contexts)}\")\n\n # Set up dictionary of default parameters\n texts_base_context = {\n\n \"font.size\": 12,\n \"axes.labelsize\": 12,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 11,\n \"ytick.labelsize\": 11,\n \"legend.fontsize\": 11,\n \"legend.title_fontsize\": 12,\n\n }\n\n base_context = {\n\n \"axes.linewidth\": 1.25,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.5,\n \"lines.markersize\": 6,\n \"patch.linewidth\": 1,\n\n \"xtick.major.width\": 1.25,\n \"ytick.major.width\": 1.25,\n \"xtick.minor.width\": 1,\n \"ytick.minor.width\": 1,\n\n \"xtick.major.size\": 6,\n \"ytick.major.size\": 6,\n \"xtick.minor.size\": 4,\n \"ytick.minor.size\": 4,\n\n }\n base_context.update(texts_base_context)\n\n # Scale all the parameters by the same factor depending on the context\n scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]\n context_dict = {k: v * scaling for k, v in base_context.items()}\n\n # Now independently scale the fonts\n font_keys = texts_base_context.keys()\n font_dict = {k: context_dict[k] * font_scale for k in font_keys}\n context_dict.update(font_dict)\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _context_keys}\n context_dict.update(rc)\n\n # Wrap in a _PlottingContext object so this can be used in a with statement\n context_object = _PlottingContext(context_dict)\n\n return context_object\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 433, "name": "_PlottingContext", "kind": "ref", "category": "function", "info": " context_object = _PlottingContext(context_dict)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 438, "name": "set_context", "kind": "def", "category": "function", "info": "def set_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Set the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n :ref:`matplotlib rcParams system `.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n See :func:`plotting_context` to get the parameter values.\n\n Parameters\n ----------\n context : dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_context.rst\n\n \"\"\"\n context_object = plotting_context(context, font_scale, rc)\n mpl.rcParams.update(context_object)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 471, "name": "plotting_context", "kind": "ref", "category": "function", "info": " context_object = plotting_context(context, font_scale, rc)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 475, "name": "_RCAesthetics", "kind": "def", "category": "class", "info": "__enter__\t__exit__\t__call__"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 476, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n rc = mpl.rcParams\n self._orig = {k: rc[k] for k in self._keys}\n self._set(self)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 479, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 481, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 482, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self._orig)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 486, "name": "wrapper", "kind": "def", "category": "function", "info": " def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 488, "name": "func", "kind": "ref", "category": "function", "info": " return func(*args, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 492, "name": "_AxesStyle", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 498, "name": "_PlottingContext", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 504, "name": "set_palette", "kind": "def", "category": "function", "info": "def set_palette(palette, n_colors=None, desat=None, color_codes=False):\n \"\"\"Set the matplotlib color cycle using a seaborn palette.\n\n Parameters\n ----------\n palette : seaborn color paltte | matplotlib colormap | hls | husl\n Palette definition. Should be something that :func:`color_palette`\n can process.\n n_colors : int\n Number of colors in the cycle. The default number of colors will depend\n on the format of ``palette``, see the :func:`color_palette`\n documentation for more information.\n desat : float\n Proportion to desaturate each color by.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n\n Examples\n --------\n >>> set_palette(\"Reds\")\n\n >>> set_palette(\"Set1\", 8, .75)\n\n See Also\n --------\n color_palette : build a color palette or set the color cycle temporarily\n in a ``with`` statement.\n set_context : set parameters to scale plot elements\n set_style : set the default parameters for figure style\n\n \"\"\"\n colors = palettes.color_palette(palette, n_colors, desat)\n cyl = cycler('color', colors)\n mpl.rcParams['axes.prop_cycle'] = cyl\n mpl.rcParams[\"patch.facecolor\"] = colors[0]\n if color_codes:\n try:\n palettes.set_color_codes(palette)\n except (ValueError, TypeError):\n pass\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 536, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = palettes.color_palette(palette, n_colors, desat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 542, "name": "set_color_codes", "kind": "ref", "category": "function", "info": " palettes.set_color_codes(palette)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 24, "name": "_LinearPlotter", "kind": "def", "category": "class", "info": "establish_variables\tdropna\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 31, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, data, **kws):\n \"\"\"Extract variables from data or use directly.\"\"\"\n self.data = data\n\n # Validate the inputs\n any_strings = any([isinstance(v, str) for v in kws.values()])\n if any_strings and data is None:\n raise ValueError(\"Must pass `data` if using named variables.\")\n\n # Set the variables\n for var, val in kws.items():\n if isinstance(val, str):\n vector = data[val]\n elif isinstance(val, list):\n vector = np.asarray(val)\n else:\n vector = val\n if vector is not None and vector.shape != (1,):\n vector = np.squeeze(vector)\n if np.ndim(vector) > 1:\n err = \"regplot inputs must be 1d\"\n raise ValueError(err)\n setattr(self, var, vector)\n\n def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 55, "name": "dropna", "kind": "def", "category": "function", "info": " def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 69, "name": "_RegressionPlotter", "kind": "def", "category": "class", "info": "__init__\tscatter_data\testimate_data\tfit_regression\tfit_fast\tfit_poly\tfit_statsmodels\tfit_lowess\tfit_logx\tbin_predictor\tregress_out\tplot\tscatterplot\tlineplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 106, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(data, x=x, y=y, units=units,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 111, "name": "dropna", "kind": "ref", "category": "function", "info": " self.dropna(\"x\", \"y\", \"units\", \"x_partial\", \"y_partial\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 115, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.x = self.regress_out(self.x, self.x_partial)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 117, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.y = self.regress_out(self.y, self.y_partial)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 122, "name": "bin_predictor", "kind": "ref", "category": "function", "info": " x_discrete, x_bins = self.bin_predictor(x_bins)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 136, "name": "scatter_data", "kind": "def", "category": "function", "info": " def scatter_data(self):\n \"\"\"Data where each observation is a point.\"\"\"\n x_j = self.x_jitter\n if x_j is None:\n x = self.x\n else:\n x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n\n y_j = self.y_jitter\n if y_j is None:\n y = self.y\n else:\n y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n\n return x, y\n\n @property\n def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 142, "name": "uniform", "kind": "ref", "category": "function", "info": " x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 148, "name": "uniform", "kind": "ref", "category": "function", "info": " y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 153, "name": "estimate_data", "kind": "def", "category": "function", "info": " def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 163, "name": "x_estimator", "kind": "ref", "category": "function", "info": " est = self.x_estimator(_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 177, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = algo.bootstrap(_y,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 182, "name": "ci", "kind": "ref", "category": "function", "info": " _ci = utils.ci(boots, self.x_ci)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 187, "name": "fit_regression", "kind": "def", "category": "function", "info": " def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 197, "name": "get_xlim", "kind": "ref", "category": "function", "info": " x_min, x_max = ax.get_xlim()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 203, "name": "fit_poly", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_poly(grid, self.order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 207, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 211, "name": "fit_lowess", "kind": "ref", "category": "function", "info": " grid, yhat = self.fit_lowess()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 214, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 216, "name": "fit_logx", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_logx(grid)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 218, "name": "fit_fast", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_fast(grid)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 224, "name": "ci", "kind": "ref", "category": "function", "info": " err_bands = utils.ci(yhat_boots, ci, axis=0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 228, "name": "fit_fast", "kind": "def", "category": "function", "info": " def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 230, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 231, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 235, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 239, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 247, "name": "fit_poly", "kind": "def", "category": "function", "info": " def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 249, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 253, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(x, y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 257, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(x, y,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 264, "name": "fit_statsmodels", "kind": "def", "category": "function", "info": " def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 270, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 272, "name": "model", "kind": "ref", "category": "function", "info": " yhat = model(_y, _x, **kwargs).fit().predict(grid)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 278, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(X, y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 282, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 289, "name": "fit_lowess", "kind": "def", "category": "function", "info": " def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 295, "name": "fit_logx", "kind": "def", "category": "function", "info": " def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 300, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 302, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 304, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 308, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 316, "name": "bin_predictor", "kind": "def", "category": "function", "info": " def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 330, "name": "regress_out", "kind": "def", "category": "function", "info": " def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 336, "name": "pinv", "kind": "ref", "category": "function", "info": " a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 350, "name": "get_color", "kind": "ref", "category": "function", "info": " color = lines.get_color()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 364, "name": "scatterplot", "kind": "ref", "category": "function", "info": " self.scatterplot(ax, scatter_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 367, "name": "lineplot", "kind": "ref", "category": "function", "info": " self.lineplot(ax, line_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 371, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.x.name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 373, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.y.name)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 375, "name": "scatterplot", "kind": "def", "category": "function", "info": " def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 407, "name": "lineplot", "kind": "def", "category": "function", "info": " def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 410, "name": "fit_regression", "kind": "ref", "category": "function", "info": " grid, yhat, err_bands = self.fit_regression(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 557, "name": "lmplot", "kind": "def", "category": "function", "info": "def lmplot(\n data=None, *,\n x=None, y=None, hue=None, col=None, row=None,\n palette=None, col_wrap=None, height=5, aspect=1, markers=\"o\",\n sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,\n legend=True, legend_out=None, x_estimator=None, x_bins=None,\n x_ci=\"ci\", scatter=True, fit_reg=True, ci=95, n_boot=1000,\n units=None, seed=None, order=1, logistic=False, lowess=False,\n robust=False, logx=False, x_partial=None, y_partial=None,\n truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,\n line_kws=None, facet_kws=None, size=None,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 580, "name": "facet_kw_deprecation", "kind": "def", "category": "function", "info": " def facet_kw_deprecation(key, val):\n msg = (\n f\"{key} is deprecated from the `lmplot` function signature. \"\n \"Please update your code to pass it using `facet_kws`.\"\n )\n if val is not None:\n warnings.warn(msg, UserWarning)\n facet_kws[key] = val\n\n facet_kw_deprecation(\"sharex\", sharex)\n facet_kw_deprecation(\"sharey\", sharey)\n facet_kw_deprecation(\"legend_out\", legend_out)\n\n if data is None:\n raise TypeError(\"Missing required keyword argument `data`.\")\n\n # Reduce the dataframe to only needed columns\n need_cols = [x, y, hue, col, row, units, x_partial, y_partial]\n cols = np.unique([a for a in need_cols if a is not None]).tolist()\n data = data[cols]\n\n # Initialize the grid\n facets = FacetGrid(\n data, row=row, col=col, hue=hue,\n palette=palette,\n row_order=row_order, col_order=col_order, hue_order=hue_order,\n height=height, aspect=aspect, col_wrap=col_wrap,\n **facet_kws,\n )\n\n # Add the markers here as FacetGrid has figured out how many levels of the\n # hue variable are needed and we don't want to duplicate that process\n if facets.hue_names is None:\n n_markers = 1\n else:\n n_markers = len(facets.hue_names)\n if not isinstance(markers, list):\n markers = [markers] * n_markers\n if len(markers) != n_markers:\n raise ValueError(\"markers must be a singleton or a list of markers \"\n \"for each level of the hue variable\")\n facets.hue_kws = {\"marker\": markers}\n\n def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 589, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharex\", sharex)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 590, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharey\", sharey)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 591, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"legend_out\", legend_out)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 598, "name": "tolist", "kind": "ref", "category": "function", "info": " cols = np.unique([a for a in need_cols if a is not None]).tolist()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 602, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " facets = FacetGrid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 623, "name": "update_datalim", "kind": "def", "category": "function", "info": " def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 624, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 624, "name": "astype", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 625, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys, updatey=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 626, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scaley=False)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 628, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(update_datalim, x=x, y=y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 639, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 640, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " facets.set_axis_labels(x, y)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 644, "name": "add_legend", "kind": "ref", "category": "function", "info": " facets.add_legend()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 831, "name": "regplot", "kind": "def", "category": "function", "info": "def regplot(\n data=None, *, x=None, y=None,\n x_estimator=None, x_bins=None, x_ci=\"ci\",\n scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,\n seed=None, order=1, logistic=False, lowess=False, robust=False,\n logx=False, x_partial=None, y_partial=None,\n truncate=True, dropna=True, x_jitter=None, y_jitter=None,\n label=None, color=None, marker=\"o\",\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 842, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 1027, "name": "residplot", "kind": "def", "category": "function", "info": "def residplot(\n data=None, *, x=None, y=None,\n x_partial=None, y_partial=None, lowess=False,\n order=1, robust=False, dropna=True, label=None, color=None,\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 1083, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, ci=None,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 1092, "name": "fit_regression", "kind": "ref", "category": "function", "info": " _, yhat, _ = plotter.fit_regression(grid=plotter.x)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 24, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": "_relational_narrative = DocstringComponents(dict(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 176, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 178, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 179, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " rel=DocstringComponents(_relational_docs),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 180, "name": "from_function_params", "kind": "ref", "category": "function", "info": " stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 184, "name": "_RelationalPlotter", "kind": "def", "category": "class", "info": "add_legend_data"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 193, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax):\n \"\"\"Add labeled artists to represent the different plot semantics.\"\"\"\n verbosity = self.legend\n if isinstance(verbosity, str) and verbosity not in [\"auto\", \"brief\", \"full\"]:\n err = \"`legend` must be 'auto', 'brief', 'full', or a boolean.\"\n raise ValueError(err)\n elif verbosity is True:\n verbosity = \"auto\"\n\n legend_kwargs = {}\n keys = []\n\n # Assign a legend title if there is only going to be one sub-legend,\n # otherwise, subtitles will be inserted into the texts list with an\n # invisible handle (which is a hack)\n titles = {\n title for title in\n (self.variables.get(v, None) for v in [\"hue\", \"size\", \"style\"])\n if title is not None\n }\n if len(titles) == 1:\n legend_title = titles.pop()\n else:\n legend_title = \"\"\n\n title_kws = dict(\n visible=False, color=\"w\", s=0, linewidth=0, marker=\"\", dashes=\"\"\n )\n\n def update(var_name, val_name, **kws):\n\n key = var_name, val_name\n if key in legend_kwargs:\n legend_kwargs[key].update(**kws)\n else:\n keys.append(key)\n\n legend_kwargs[key] = dict(**kws)\n\n # Define the maximum number of ticks to use for \"brief\" legends\n brief_ticks = 6\n\n # -- Add a legend for hue semantics\n brief_hue = self._hue_map.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(self._hue_map.levels) > brief_ticks)\n )\n if brief_hue:\n if isinstance(self._hue_map.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n limits = min(self._hue_map.levels), max(self._hue_map.levels)\n hue_levels, hue_formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[\"hue\"].infer_objects().dtype\n )\n elif self._hue_map.levels is None:\n hue_levels = hue_formatted_levels = []\n else:\n hue_levels = hue_formatted_levels = self._hue_map.levels\n\n # Add the hue semantic subtitle\n if not legend_title and self.variables.get(\"hue\", None) is not None:\n update((self.variables[\"hue\"], \"title\"),\n self.variables[\"hue\"], **title_kws)\n\n # Add the hue semantic labels\n for level, formatted_level in zip(hue_levels, hue_formatted_levels):\n if level is not None:\n color = self._hue_map(level)\n update(self.variables[\"hue\"], formatted_level, color=color)\n\n # -- Add a legend for size semantics\n brief_size = self._size_map.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(self._size_map.levels) > brief_ticks)\n )\n if brief_size:\n # Define how ticks will interpolate between the min/max data values\n if isinstance(self._size_map.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n # Define the min/max data values\n limits = min(self._size_map.levels), max(self._size_map.levels)\n size_levels, size_formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[\"size\"].infer_objects().dtype\n )\n elif self._size_map.levels is None:\n size_levels = size_formatted_levels = []\n else:\n size_levels = size_formatted_levels = self._size_map.levels\n\n # Add the size semantic subtitle\n if not legend_title and self.variables.get(\"size\", None) is not None:\n update((self.variables[\"size\"], \"title\"),\n self.variables[\"size\"], **title_kws)\n\n # Add the size semantic labels\n for level, formatted_level in zip(size_levels, size_formatted_levels):\n if level is not None:\n size = self._size_map(level)\n update(\n self.variables[\"size\"],\n formatted_level,\n linewidth=size,\n s=size,\n )\n\n # -- Add a legend for style semantics\n\n # Add the style semantic title\n if not legend_title and self.variables.get(\"style\", None) is not None:\n update((self.variables[\"style\"], \"title\"),\n self.variables[\"style\"], **title_kws)\n\n # Add the style semantic labels\n if self._style_map.levels is not None:\n for level in self._style_map.levels:\n if level is not None:\n attrs = self._style_map(level)\n update(\n self.variables[\"style\"],\n level,\n marker=attrs.get(\"marker\", \"\"),\n dashes=attrs.get(\"dashes\", \"\"),\n )\n\n func = getattr(ax, self._legend_func)\n\n legend_data = {}\n legend_order = []\n\n for key in keys:\n\n _, label = key\n kws = legend_kwargs[key]\n kws.setdefault(\"color\", \".2\")\n use_kws = {}\n for attr in self._legend_attributes + [\"visible\"]:\n if attr in kws:\n use_kws[attr] = kws[attr]\n artist = func([], [], label=label, **use_kws)\n if self._legend_func == \"plot\":\n artist = artist[0]\n legend_data[key] = artist\n legend_order.append(key)\n\n self.legend_title = legend_title\n self.legend_data = legend_data\n self.legend_order = legend_order\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 246, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " hue_levels, hue_formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 247, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[\"hue\"].infer_objects().dtype\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 262, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 278, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " size_levels, size_formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 279, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[\"size\"].infer_objects().dtype\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 294, "name": "_size_map", "kind": "ref", "category": "function", "info": " size = self._size_map(level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 313, "name": "_style_map", "kind": "ref", "category": "function", "info": " attrs = self._style_map(level)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 335, "name": "func", "kind": "ref", "category": "function", "info": " artist = func([], [], label=label, **use_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 346, "name": "_LinePlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 405, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " agg = EstimateAggregator(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 421, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 426, "name": "sort_values", "kind": "ref", "category": "function", "info": " sub_data = sub_data.sort_values(sort_cols)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 433, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped = sub_data.groupby(grouper, sort=self.sort)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 436, "name": "apply", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, agg_var).reset_index()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 436, "name": "reset_index", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, agg_var).reset_index()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 440, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 448, "name": "groupby", "kind": "ref", "category": "function", "info": " for _, unit_data in sub_data.groupby(\"units\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 456, "name": "set_color", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 456, "name": "_hue_map", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 459, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 459, "name": "_size_map", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 462, "name": "_style_map", "kind": "ref", "category": "function", "info": " attributes = self._style_map(sub_vars[\"style\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 464, "name": "set_dashes", "kind": "ref", "category": "function", "info": " line.set_dashes(attributes[\"dashes\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 466, "name": "set_marker", "kind": "ref", "category": "function", "info": " line.set_marker(attributes[\"marker\"])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 468, "name": "get_color", "kind": "ref", "category": "function", "info": " line_color = line.get_color()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 469, "name": "get_alpha", "kind": "ref", "category": "function", "info": " line_alpha = line.get_alpha()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 470, "name": "get_solid_capstyle", "kind": "ref", "category": "function", "info": " line_capstyle = line.get_solid_capstyle()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 498, "name": "get_children", "kind": "ref", "category": "function", "info": " for obj in ebars.get_children():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 500, "name": "set_capstyle", "kind": "ref", "category": "function", "info": " obj.set_capstyle(line_capstyle)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 503, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 505, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 506, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 509, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 512, "name": "_ScatterPlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 542, "name": "dropna", "kind": "ref", "category": "function", "info": " data = self.plot_data.dropna()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 556, "name": "_style_map", "kind": "ref", "category": "function", "info": " example_marker = self._style_map(example_level, \"marker\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 564, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 565, "name": "is_filled", "kind": "ref", "category": "function", "info": " if m.is_filled():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 578, "name": "set_facecolors", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 578, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 581, "name": "set_sizes", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 581, "name": "_size_map", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 584, "name": "_style_map", "kind": "ref", "category": "function", "info": " p = [self._style_map(val, \"path\") for val in data[\"style\"]]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 585, "name": "set_paths", "kind": "ref", "category": "function", "info": " points.set_paths(p)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 590, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 591, "name": "set_linewidths", "kind": "ref", "category": "function", "info": " points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 594, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 596, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 597, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 600, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 603, "name": "lineplot", "kind": "def", "category": "function", "info": "def lineplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n dashes=True, markers=None, style_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, seed=None,\n sort=True, err_style=\"band\", err_kws=None, ci=\"deprecated\",\n legend=\"auto\", ax=None, **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 615, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = _deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 617, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _LinePlotter.get_semantics(locals())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 618, "name": "_LinePlotter", "kind": "ref", "category": "function", "info": " p = _LinePlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 625, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 626, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 627, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 638, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 643, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 729, "name": "scatterplot", "kind": "def", "category": "function", "info": "def scatterplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=True, style_order=None,\n x_bins=None, y_bins=None,\n units=None, estimator=None, ci=95, n_boot=1000,\n alpha=None, x_jitter=None, y_jitter=None,\n legend=\"auto\", ax=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 742, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _ScatterPlotter.get_semantics(locals())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 743, "name": "_ScatterPlotter", "kind": "ref", "category": "function", "info": " p = _ScatterPlotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 750, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 751, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 752, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, order=style_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 760, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 765, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 845, "name": "relplot", "kind": "def", "category": "function", "info": "def relplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n row=None, col=None, col_wrap=None, row_order=None, col_order=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=None, dashes=None, style_order=None,\n legend=\"auto\", kind=\"scatter\", height=5, aspect=1, facet_kws=None,\n **kwargs\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 882, "name": "plotter", "kind": "ref", "category": "function", "info": " p = plotter(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 884, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=plotter.get_semantics(locals()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 887, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 888, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 889, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 907, "name": "_style_map", "kind": "ref", "category": "function", "info": " markers = {k: p._style_map(k, \"marker\") for k in style_order}\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 911, "name": "_style_map", "kind": "ref", "category": "function", "info": " dashes = {k: p._style_map(k, \"dashes\") for k in style_order}\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 936, "name": "assign_variables", "kind": "ref", "category": "function", "info": " p.assign_variables(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 962, "name": "rename", "kind": "ref", "category": "function", "info": " full_data = p.plot_data.rename(columns=new_cols)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 966, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 967, "name": "dropna", "kind": "ref", "category": "function", "info": " data=full_data.dropna(axis=1, how=\"all\"),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 975, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(func, **plot_kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 985, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " p.add_legend_data(g.axes.flat[0])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 987, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(legend_data=p.legend_data,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 997, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = g.data.rename(columns=orig_cols)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 22, "name": "ci_to_errsize", "kind": "def", "category": "function", "info": "def ci_to_errsize(cis, heights):\n \"\"\"Convert intervals to error arguments relative to plot heights.\n\n Parameters\n ----------\n cis : 2 x n sequence\n sequence of confidence interval limits\n heights : n sequence\n sequence of plot heights\n\n Returns\n -------\n errsize : 2 x n array\n sequence of error size relative to height values in correct\n format as argument for plt.bar\n\n \"\"\"\n cis = np.atleast_2d(cis).reshape(2, -1)\n heights = np.atleast_1d(heights)\n errsize = []\n for i, (low, high) in enumerate(np.transpose(cis)):\n h = heights[i]\n elow = h - low\n ehigh = high - h\n errsize.append([elow, ehigh])\n\n errsize = np.asarray(errsize).T\n return errsize\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 52, "name": "_normal_quantile_func", "kind": "def", "category": "function", "info": "def _normal_quantile_func(q):\n \"\"\"\n Compute the quantile function of the standard normal distribution.\n\n This wrapper exists because we are dropping scipy as a mandatory dependency\n but statistics.NormalDist was added to the standard library in 3.8.\n\n \"\"\"\n try:\n from statistics import NormalDist\n qf = np.vectorize(NormalDist().inv_cdf)\n except ImportError:\n try:\n from scipy.stats import norm\n qf = norm.ppf\n except ImportError:\n msg = (\n \"Standard normal quantile functions require either Python>=3.8 or scipy\"\n )\n raise RuntimeError(msg)\n return qf(q)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 72, "name": "qf", "kind": "ref", "category": "function", "info": " return qf(q)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 75, "name": "_draw_figure", "kind": "def", "category": "function", "info": "def _draw_figure(fig):\n \"\"\"Force draw of a matplotlib figure, accounting for back-compat.\"\"\"\n # See https://github.com/matplotlib/matplotlib/issues/19197 for context\n fig.canvas.draw()\n if fig.stale:\n try:\n fig.draw(fig.canvas.get_renderer())\n except AttributeError:\n pass\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 81, "name": "get_renderer", "kind": "ref", "category": "function", "info": " fig.draw(fig.canvas.get_renderer())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 86, "name": "_default_color", "kind": "def", "category": "function", "info": "def _default_color(method, hue, color, kws):\n \"\"\"If needed, get a default color by using the matplotlib property cycle.\"\"\"\n if hue is not None:\n # This warning is probably user-friendly, but it's currently triggered\n # in a FacetGrid context and I don't want to mess with that logic right now\n # if color is not None:\n # msg = \"`color` is ignored when `hue` is assigned.\"\n # warnings.warn(msg)\n return None\n\n if color is not None:\n return color\n\n elif method.__name__ == \"plot\":\n\n scout, = method([], [], **kws)\n color = scout.get_color()\n scout.remove()\n\n elif method.__name__ == \"scatter\":\n\n # Matplotlib will raise if the size of x/y don't match s/c,\n # and the latter might be in the kws dict\n scout_size = max(\n np.atleast_1d(kws.get(key, [])).shape[0]\n for key in [\"s\", \"c\", \"fc\", \"facecolor\", \"facecolors\"]\n )\n scout_x = scout_y = np.full(scout_size, np.nan)\n\n scout = method(scout_x, scout_y, **kws)\n facecolors = scout.get_facecolors()\n\n if not len(facecolors):\n # Handle bug in matplotlib <= 3.2 (I think)\n # This will limit the ability to use non color= kwargs to specify\n # a color in versions of matplotlib with the bug, but trying to\n # work out what the user wanted by re-implementing the broken logic\n # of inspecting the kwargs is probably too brittle.\n single_color = False\n else:\n single_color = np.unique(facecolors, axis=0).shape[0] == 1\n\n # Allow the user to specify an array of colors through various kwargs\n if \"c\" not in kws and single_color:\n color = to_rgb(facecolors[0])\n\n scout.remove()\n\n elif method.__name__ == \"bar\":\n\n # bar() needs masked, not empty data, to generate a patch\n scout, = method([np.nan], [np.nan], **kws)\n color = to_rgb(scout.get_facecolor())\n scout.remove()\n\n elif method.__name__ == \"fill_between\":\n\n # There is a bug on matplotlib < 3.3 where fill_between with\n # datetime units and empty data will set incorrect autoscale limits\n # To workaround it, we'll always return the first color in the cycle.\n # https://github.com/matplotlib/matplotlib/issues/17586\n ax = method.__self__\n datetime_axis = any([\n isinstance(ax.xaxis.converter, mpl.dates.DateConverter),\n isinstance(ax.yaxis.converter, mpl.dates.DateConverter),\n ])\n if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n return \"C0\"\n\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n\n scout = method([], [], **kws)\n facecolor = scout.get_facecolor()\n color = to_rgb(facecolor[0])\n scout.remove()\n\n return color\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 101, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([], [], **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 102, "name": "get_color", "kind": "ref", "category": "function", "info": " color = scout.get_color()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 115, "name": "method", "kind": "ref", "category": "function", "info": " scout = method(scout_x, scout_y, **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 116, "name": "get_facecolors", "kind": "ref", "category": "function", "info": " facecolors = scout.get_facecolors()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 137, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([np.nan], [np.nan], **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 138, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " color = to_rgb(scout.get_facecolor())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 152, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 152, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 155, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 157, "name": "method", "kind": "ref", "category": "function", "info": " scout = method([], [], **kws)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 158, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " facecolor = scout.get_facecolor()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 165, "name": "desaturate", "kind": "def", "category": "function", "info": "def desaturate(color, prop):\n \"\"\"Decrease the saturation channel of a color by some percent.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n prop : float\n saturation channel of color will be multiplied by this value\n\n Returns\n -------\n new_color : rgb tuple\n desaturated color code in RGB tuple representation\n\n \"\"\"\n # Check inputs\n if not 0 <= prop <= 1:\n raise ValueError(\"prop must be between 0 and 1\")\n\n # Get rgb tuple rep\n rgb = to_rgb(color)\n\n # Convert to hls\n h, l, s = colorsys.rgb_to_hls(*rgb)\n\n # Desaturate the saturation channel\n s *= prop\n\n # Convert back to rgb\n new_color = colorsys.hls_to_rgb(h, l, s)\n\n return new_color\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 200, "name": "saturate", "kind": "def", "category": "function", "info": "def saturate(color):\n \"\"\"Return a fully saturated color with the same hue.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n\n Returns\n -------\n new_color : rgb tuple\n saturated color code in RGB tuple representation\n\n \"\"\"\n return set_hls_values(color, s=1)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 214, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " return set_hls_values(color, s=1)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 217, "name": "set_hls_values", "kind": "def", "category": "function", "info": "def set_hls_values(color, h=None, l=None, s=None): # noqa\n \"\"\"Independently manipulate the h, l, or s channels of a color.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n h, l, s : floats between 0 and 1, or None\n new values for each channel in hls space\n\n Returns\n -------\n new_color : rgb tuple\n new color code in RGB tuple representation\n\n \"\"\"\n # Get an RGB tuple representation\n rgb = to_rgb(color)\n vals = list(colorsys.rgb_to_hls(*rgb))\n for i, val in enumerate([h, l, s]):\n if val is not None:\n vals[i] = val\n\n rgb = colorsys.hls_to_rgb(*vals)\n return rgb\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 244, "name": "axlabel", "kind": "def", "category": "function", "info": "def axlabel(xlabel, ylabel, **kwargs):\n \"\"\"Grab current axis and label it.\n\n DEPRECATED: will be removed in a future version.\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg, FutureWarning)\n ax = plt.gca()\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 253, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 254, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 257, "name": "remove_na", "kind": "def", "category": "function", "info": "def remove_na(vector):\n \"\"\"Helper method for removing null values from data vectors.\n\n Parameters\n ----------\n vector : vector object\n Must implement boolean masking with [] subscript syntax.\n\n Returns\n -------\n clean_clean : same type as ``vector``\n Vector of data with null values removed. May be a copy or a view.\n\n \"\"\"\n return vector[pd.notnull(vector)]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 274, "name": "get_color_cycle", "kind": "def", "category": "function", "info": "def get_color_cycle():\n \"\"\"Return the list of colors in the current matplotlib color cycle\n\n Parameters\n ----------\n None\n\n Returns\n -------\n colors : list\n List of matplotlib colors in the current cycle, or dark gray if\n the current color cycle is empty.\n \"\"\"\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 288, "name": "by_key", "kind": "ref", "category": "function", "info": " return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 291, "name": "despine", "kind": "def", "category": "function", "info": "def despine(fig=None, ax=None, top=True, right=True, left=False,\n bottom=False, offset=None, trim=False):\n \"\"\"Remove the top and right spines from plot(s).\n\n fig : matplotlib figure, optional\n Figure to despine all axes of, defaults to the current figure.\n ax : matplotlib axes, optional\n Specific axes object to despine. Ignored if fig is provided.\n top, right, left, bottom : boolean, optional\n If True, remove that spine.\n offset : int or dict, optional\n Absolute distance, in points, spines should be moved away\n from the axes (negative values move spines inward). A single value\n applies to all spines; a dict can be used to set offset values per\n side.\n trim : bool, optional\n If True, limit spines to the smallest and largest major tick\n on each non-despined axis.\n\n Returns\n -------\n None\n\n \"\"\"\n # Get references to the axes we want\n if fig is None and ax is None:\n axes = plt.gcf().axes\n elif fig is not None:\n axes = fig.axes\n elif ax is not None:\n axes = [ax]\n\n for ax_i in axes:\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n # Toggle the spine objects\n is_visible = not locals()[side]\n ax_i.spines[side].set_visible(is_visible)\n if offset is not None and is_visible:\n try:\n val = offset.get(side, 0)\n except AttributeError:\n val = offset\n ax_i.spines[side].set_position(('outward', val))\n\n # Potentially move the ticks\n if left and not right:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.minorTicks\n )\n ax_i.yaxis.set_ticks_position(\"right\")\n for t in ax_i.yaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.yaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if bottom and not top:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.minorTicks\n )\n ax_i.xaxis.set_ticks_position(\"top\")\n for t in ax_i.xaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.xaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if trim:\n # clip off the parts of the spines that extend past major ticks\n xticks = np.asarray(ax_i.get_xticks())\n if xticks.size:\n firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n xticks)[0]\n lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n xticks)[-1]\n ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n ax_i.spines['top'].set_bounds(firsttick, lasttick)\n newticks = xticks.compress(xticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_xticks(newticks)\n\n yticks = np.asarray(ax_i.get_yticks())\n if yticks.size:\n firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n yticks)[0]\n lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n yticks)[-1]\n ax_i.spines['left'].set_bounds(firsttick, lasttick)\n ax_i.spines['right'].set_bounds(firsttick, lasttick)\n newticks = yticks.compress(yticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_yticks(newticks)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 327, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_visible(is_visible)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 333, "name": "set_position", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_position(('outward', val))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 338, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 342, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 345, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.yaxis.set_ticks_position(\"right\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 347, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 349, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 353, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 357, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 360, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.xaxis.set_ticks_position(\"top\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 362, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 364, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 368, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = np.asarray(ax_i.get_xticks())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 370, "name": "get_xlim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 372, "name": "get_xlim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 374, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 375, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['top'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 378, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax_i.set_xticks(newticks)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 380, "name": "get_yticks", "kind": "ref", "category": "function", "info": " yticks = np.asarray(ax_i.get_yticks())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 382, "name": "get_ylim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 384, "name": "get_ylim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 386, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['left'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 387, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['right'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 390, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax_i.set_yticks(newticks)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 393, "name": "move_legend", "kind": "def", "category": "function", "info": "def move_legend(obj, loc, **kwargs):\n \"\"\"\n Recreate a plot's legend at a new location.\n\n The name is a slight misnomer. Matplotlib legends do not expose public\n control over their position parameters. So this function creates a new legend,\n copying over the data from the original object, which is then removed.\n\n Parameters\n ----------\n obj : the object with the plot\n This argument can be either a seaborn or matplotlib object:\n\n - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`\n - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`\n\n loc : str or int\n Location argument, as in :meth:`matplotlib.axes.Axes.legend`.\n\n kwargs\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.\n\n Examples\n --------\n\n .. include:: ../docstrings/move_legend.rst\n\n \"\"\"\n # This is a somewhat hackish solution that will hopefully be obviated by\n # upstream improvements to matplotlib legends that make them easier to\n # modify after creation.\n\n from seaborn.axisgrid import Grid # Avoid circular import\n\n # Locate the legend object and a method to recreate the legend\n if isinstance(obj, Grid):\n old_legend = obj.legend\n legend_func = obj.figure.legend\n elif isinstance(obj, mpl.axes.Axes):\n old_legend = obj.legend_\n legend_func = obj.legend\n elif isinstance(obj, mpl.figure.Figure):\n if obj.legends:\n old_legend = obj.legends[-1]\n else:\n old_legend = None\n legend_func = obj.legend\n else:\n err = \"`obj` must be a seaborn Grid or matplotlib Axes or Figure instance.\"\n raise TypeError(err)\n\n if old_legend is None:\n err = f\"{obj} has no legend attached.\"\n raise ValueError(err)\n\n # Extract the components of the legend we need to reuse\n handles = old_legend.legendHandles\n labels = [t.get_text() for t in old_legend.get_texts()]\n\n # Extract legend properties that can be passed to the recreation method\n # (Vexingly, these don't all round-trip)\n legend_kws = inspect.signature(mpl.legend.Legend).parameters\n props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n\n # Delegate default bbox_to_anchor rules to matplotlib\n props.pop(\"bbox_to_anchor\")\n\n # Try to propagate the existing title and font properties; respect new ones too\n title = props.pop(\"title\")\n if \"title\" in kwargs:\n title.set_text(kwargs.pop(\"title\"))\n title_kwargs = {k: v for k, v in kwargs.items() if k.startswith(\"title_\")}\n for key, val in title_kwargs.items():\n title.set(**{key[6:]: val})\n kwargs.pop(key)\n\n # Try to respect the frame visibility\n kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n\n # Remove the old legend and create the new one\n props.update(kwargs)\n old_legend.remove()\n new_legend = legend_func(handles, labels, loc=loc, **props)\n new_legend.set_title(title.get_text(), title.get_fontproperties())\n\n # Let the Grid object continue to track the correct legend object\n if isinstance(obj, Grid):\n obj._legend = new_legend\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 450, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 450, "name": "get_texts", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 455, "name": "properties", "kind": "ref", "category": "function", "info": " props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 463, "name": "set_text", "kind": "ref", "category": "function", "info": " title.set_text(kwargs.pop(\"title\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 470, "name": "get_visible", "kind": "ref", "category": "function", "info": " kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 475, "name": "legend_func", "kind": "ref", "category": "function", "info": " new_legend = legend_func(handles, labels, loc=loc, **props)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 476, "name": "set_title", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 476, "name": "get_text", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 476, "name": "get_fontproperties", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 483, "name": "_kde_support", "kind": "def", "category": "function", "info": "def _kde_support(data, bw, gridsize, cut, clip):\n \"\"\"Establish support for a kernel density estimate.\"\"\"\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n support = np.linspace(support_min, support_max, gridsize)\n\n return support\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 492, "name": "ci", "kind": "def", "category": "function", "info": "def ci(a, which=95, axis=None):\n \"\"\"Return a percentile range from an array of values.\"\"\"\n p = 50 - which / 2, 50 + which / 2\n return np.nanpercentile(a, p, axis)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 498, "name": "get_dataset_names", "kind": "def", "category": "function", "info": "def get_dataset_names():\n \"\"\"Report available example datasets, useful for reporting issues.\n\n Requires an internet connection.\n\n \"\"\"\n url = \"https://github.com/mwaskom/seaborn-data\"\n with urlopen(url) as resp:\n html = resp.read()\n\n pat = r\"/mwaskom/seaborn-data/blob/master/(\\w*).csv\"\n datasets = re.findall(pat, html.decode())\n return datasets\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 509, "name": "decode", "kind": "ref", "category": "function", "info": " datasets = re.findall(pat, html.decode())\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 513, "name": "get_data_home", "kind": "def", "category": "function", "info": "def get_data_home(data_home=None):\n \"\"\"Return a path to the cache directory for example datasets.\n\n This directory is used by :func:`load_dataset`.\n\n If the ``data_home`` argument is not provided, it will use a directory\n specified by the `SEABORN_DATA` environment variable (if it exists)\n or otherwise default to an OS-appropriate user cache location.\n\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 524, "name": "user_cache_dir", "kind": "ref", "category": "function", "info": " data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 525, "name": "expanduser", "kind": "ref", "category": "function", "info": " data_home = os.path.expanduser(data_home)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 526, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(data_home):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 531, "name": "load_dataset", "kind": "def", "category": "function", "info": "def load_dataset(name, cache=True, data_home=None, **kws):\n \"\"\"Load an example dataset from the online repository (requires internet).\n\n This function provides quick access to a small number of example datasets\n that are useful for documenting seaborn or generating reproducible examples\n for bug reports. It is not necessary for normal usage.\n\n Note that some of the datasets have a small amount of preprocessing applied\n to define a proper ordering for categorical variables.\n\n Use :func:`get_dataset_names` to see a list of available datasets.\n\n Parameters\n ----------\n name : str\n Name of the dataset (``{name}.csv`` on\n https://github.com/mwaskom/seaborn-data).\n cache : boolean, optional\n If True, try to load from the local cache first, and save to the cache\n if a download is required.\n data_home : string, optional\n The directory in which to cache data; see :func:`get_data_home`.\n kws : keys and values, optional\n Additional keyword arguments are passed to passed through to\n :func:`pandas.read_csv`.\n\n Returns\n -------\n df : :class:`pandas.DataFrame`\n Tabular data, possibly with some preprocessing applied.\n\n \"\"\"\n # A common beginner mistake is to assume that one's personal data needs\n # to be passed through this function to be usable with seaborn.\n # Let's provide a more helpful error than you would otherwise get.\n if isinstance(name, pd.DataFrame):\n err = (\n \"This function accepts only strings (the name of an example dataset). \"\n \"You passed a pandas DataFrame. If you have your own dataset, \"\n \"it is not necessary to use this function before plotting.\"\n )\n raise TypeError(err)\n\n url = f\"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/{name}.csv\"\n\n if cache:\n cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n if not os.path.exists(cache_path):\n if name not in get_dataset_names():\n raise ValueError(f\"'{name}' is not one of the example datasets.\")\n urlretrieve(url, cache_path)\n full_path = cache_path\n else:\n full_path = url\n\n df = pd.read_csv(full_path, **kws)\n\n if df.iloc[-1].isnull().all():\n df = df.iloc[:-1]\n\n # Set some columns as a categorical type with ordered levels\n\n if name == \"tips\":\n df[\"day\"] = pd.Categorical(df[\"day\"], [\"Thur\", \"Fri\", \"Sat\", \"Sun\"])\n df[\"sex\"] = pd.Categorical(df[\"sex\"], [\"Male\", \"Female\"])\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"Lunch\", \"Dinner\"])\n df[\"smoker\"] = pd.Categorical(df[\"smoker\"], [\"Yes\", \"No\"])\n\n if name == \"flights\":\n months = df[\"month\"].str[:3]\n df[\"month\"] = pd.Categorical(months, months.unique())\n\n if name == \"exercise\":\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"1 min\", \"15 min\", \"30 min\"])\n df[\"kind\"] = pd.Categorical(df[\"kind\"], [\"rest\", \"walking\", \"running\"])\n df[\"diet\"] = pd.Categorical(df[\"diet\"], [\"no fat\", \"low fat\"])\n\n if name == \"titanic\":\n df[\"class\"] = pd.Categorical(df[\"class\"], [\"First\", \"Second\", \"Third\"])\n df[\"deck\"] = pd.Categorical(df[\"deck\"], list(\"ABCDEFG\"))\n\n if name == \"penguins\":\n df[\"sex\"] = df[\"sex\"].str.title()\n\n if name == \"diamonds\":\n df[\"color\"] = pd.Categorical(\n df[\"color\"], [\"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n )\n df[\"clarity\"] = pd.Categorical(\n df[\"clarity\"], [\"IF\", \"VVS1\", \"VVS2\", \"VS1\", \"VS2\", \"SI1\", \"SI2\", \"I1\"],\n )\n df[\"cut\"] = pd.Categorical(\n df[\"cut\"], [\"Ideal\", \"Premium\", \"Very Good\", \"Good\", \"Fair\"],\n )\n\n elif name == \"taxis\":\n df[\"pickup\"] = pd.to_datetime(df[\"pickup\"])\n df[\"dropoff\"] = pd.to_datetime(df[\"dropoff\"])\n\n return df\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 577, "name": "get_data_home", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 577, "name": "basename", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 578, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(cache_path):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 579, "name": "get_dataset_names", "kind": "ref", "category": "function", "info": " if name not in get_dataset_names():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 633, "name": "axis_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axis_ticklabels_overlap(labels):\n \"\"\"Return a boolean for whether the list of ticklabels have overlaps.\n\n Parameters\n ----------\n labels : list of matplotlib ticklabels\n\n Returns\n -------\n overlap : boolean\n True if any of the labels overlap.\n\n \"\"\"\n if not labels:\n return False\n try:\n bboxes = [l.get_window_extent() for l in labels]\n overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n return max(overlaps) > 1\n except RuntimeError:\n # Issue on macos backend raises an error in the above code\n return False\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 649, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " bboxes = [l.get_window_extent() for l in labels]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 650, "name": "count_overlaps", "kind": "ref", "category": "function", "info": " overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 657, "name": "axes_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axes_ticklabels_overlap(ax):\n \"\"\"Return booleans for whether the x and y ticklabels on an Axes overlap.\n\n Parameters\n ----------\n ax : matplotlib Axes\n\n Returns\n -------\n x_overlap, y_overlap : booleans\n True when the labels on that axis overlap.\n\n \"\"\"\n return (axis_ticklabels_overlap(ax.get_xticklabels()),\n axis_ticklabels_overlap(ax.get_yticklabels()))\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 670, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 670, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 671, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 671, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 674, "name": "locator_to_legend_entries", "kind": "def", "category": "function", "info": "def locator_to_legend_entries(locator, limits, dtype):\n \"\"\"Return levels and formatted levels for brief numeric legends.\"\"\"\n raw_levels = locator.tick_values(*limits).astype(dtype)\n\n # The locator can return ticks outside the limits, clip them here\n raw_levels = [l for l in raw_levels if l >= limits[0] and l <= limits[1]]\n\n class dummy_axis:\n def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 676, "name": "tick_values", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 676, "name": "astype", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 681, "name": "dummy_axis", "kind": "def", "category": "class", "info": "get_view_interval"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 682, "name": "get_view_interval", "kind": "def", "category": "function", "info": " def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 689, "name": "dummy_axis", "kind": "ref", "category": "function", "info": " formatter.axis = dummy_axis()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 694, "name": "set_locs", "kind": "ref", "category": "function", "info": " formatter.set_locs(raw_levels)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 695, "name": "formatter", "kind": "ref", "category": "function", "info": " formatted_levels = [formatter(x) for x in raw_levels]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 700, "name": "relative_luminance", "kind": "def", "category": "function", "info": "def relative_luminance(color):\n \"\"\"Calculate the relative luminance of a color according to W3C standards\n\n Parameters\n ----------\n color : matplotlib color or sequence of matplotlib colors\n Hex code, rgb-tuple, or html color name.\n\n Returns\n -------\n luminance : float(s) between 0 and 1\n\n \"\"\"\n rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)\n lum = rgb.dot([.2126, .7152, .0722])\n try:\n return lum.item()\n except ValueError:\n return lum\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 713, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 717, "name": "item", "kind": "ref", "category": "function", "info": " return lum.item()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 722, "name": "to_utf8", "kind": "def", "category": "function", "info": "def to_utf8(obj):\n \"\"\"Return a string representing a Python object.\n\n Strings (i.e. type ``str``) are returned unchanged.\n\n Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.\n\n For other objects, the method ``__str__()`` is called, and the result is\n returned as a string.\n\n Parameters\n ----------\n obj : object\n Any Python object\n\n Returns\n -------\n s : str\n UTF-8-decoded string representation of ``obj``\n\n \"\"\"\n if isinstance(obj, str):\n return obj\n try:\n return obj.decode(encoding=\"utf-8\")\n except AttributeError: # obj is not bytes-like\n return str(obj)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 746, "name": "decode", "kind": "ref", "category": "function", "info": " return obj.decode(encoding=\"utf-8\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 751, "name": "_normalize_kwargs", "kind": "def", "category": "function", "info": "def _normalize_kwargs(kws, artist):\n \"\"\"Wrapper for mpl.cbook.normalize_kwargs that supports <= 3.2.1.\"\"\"\n _alias_map = {\n 'color': ['c'],\n 'linewidth': ['lw'],\n 'linestyle': ['ls'],\n 'facecolor': ['fc'],\n 'edgecolor': ['ec'],\n 'markerfacecolor': ['mfc'],\n 'markeredgecolor': ['mec'],\n 'markeredgewidth': ['mew'],\n 'markersize': ['ms']\n }\n try:\n kws = normalize_kwargs(kws, artist)\n except AttributeError:\n kws = normalize_kwargs(kws, _alias_map)\n return kws\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 771, "name": "_check_argument", "kind": "def", "category": "function", "info": "def _check_argument(param, options, value):\n \"\"\"Raise if value for param is not in options.\"\"\"\n if value not in options:\n raise ValueError(\n f\"`{param}` must be one of {options}, but {repr(value)} was passed.\"\n )\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 779, "name": "_assign_default_kwargs", "kind": "def", "category": "function", "info": "def _assign_default_kwargs(kws, call_func, source_func):\n \"\"\"Assign default kwargs for call_func using values from source_func.\"\"\"\n # This exists so that axes-level functions and figure-level functions can\n # both call a Plotter method while having the default kwargs be defined in\n # the signature of the axes-level function.\n # An alternative would be to have a decorator on the method that sets its\n # defaults based on those defined in the axes-level function.\n # Then the figure-level function would not need to worry about defaults.\n # I am not sure which is better.\n needed = inspect.signature(call_func).parameters\n defaults = inspect.signature(source_func).parameters\n\n for param in needed:\n if param in defaults and param not in kws:\n kws[param] = defaults[param].default\n\n return kws\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 798, "name": "adjust_legend_subtitles", "kind": "def", "category": "function", "info": "def adjust_legend_subtitles(legend):\n \"\"\"Make invisible-handle \"subtitles\" entries look more like titles.\"\"\"\n # Legend title not in rcParams until 3.0\n font_size = plt.rcParams.get(\"legend.title_fontsize\", None)\n hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n for hpack in hpackers:\n draw_area, text_area = hpack.get_children()\n handles = draw_area.get_children()\n if not all(artist.get_visible() for artist in handles):\n draw_area.set_width(0)\n for text in text_area.get_children():\n if font_size is not None:\n text.set_size(font_size)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 802, "name": "get_children", "kind": "ref", "category": "function", "info": " hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 804, "name": "get_children", "kind": "ref", "category": "function", "info": " draw_area, text_area = hpack.get_children()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 805, "name": "get_children", "kind": "ref", "category": "function", "info": " handles = draw_area.get_children()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 806, "name": "get_visible", "kind": "ref", "category": "function", "info": " if not all(artist.get_visible() for artist in handles):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 807, "name": "set_width", "kind": "ref", "category": "function", "info": " draw_area.set_width(0)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 808, "name": "get_children", "kind": "ref", "category": "function", "info": " for text in text_area.get_children():\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 810, "name": "set_size", "kind": "ref", "category": "function", "info": " text.set_size(font_size)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 813, "name": "_deprecate_ci", "kind": "def", "category": "function", "info": "def _deprecate_ci(errorbar, ci):\n \"\"\"\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n \"\"\"\n if ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n msg = (\n \"The `ci` parameter is deprecated; \"\n f\"use `errorbar={repr(errorbar)}` for same effect.\"\n )\n warnings.warn(msg, UserWarning)\n\n return errorbar\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 35, "name": "_init_mutable_colormap", "kind": "def", "category": "function", "info": "def _init_mutable_colormap():\n \"\"\"Create a matplotlib colormap that will be updated by the widgets.\"\"\"\n greys = color_palette(\"Greys\", 256)\n cmap = LinearSegmentedColormap.from_list(\"interactive\", greys)\n cmap._init()\n cmap._set_extremes()\n return cmap\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 37, "name": "color_palette", "kind": "ref", "category": "function", "info": " greys = color_palette(\"Greys\", 256)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 44, "name": "_update_lut", "kind": "def", "category": "function", "info": "def _update_lut(cmap, colors):\n \"\"\"Change the LUT values in a matplotlib colormap in-place.\"\"\"\n cmap._lut[:256] = colors\n cmap._set_extremes()\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 50, "name": "_show_cmap", "kind": "def", "category": "function", "info": "def _show_cmap(cmap):\n \"\"\"Show a continuous matplotlib colormap.\"\"\"\n from .rcmod import axes_style # Avoid circular import\n with axes_style(\"white\"):\n f, ax = plt.subplots(figsize=(8.25, .75))\n ax.set(xticks=[], yticks=[])\n x = np.linspace(0, 1, 256)[np.newaxis, :]\n ax.pcolormesh(x, cmap=cmap)\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 53, "name": "axes_style", "kind": "ref", "category": "function", "info": " with axes_style(\"white\"):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 60, "name": "choose_colorbrewer_palette", "kind": "def", "category": "function", "info": "def choose_colorbrewer_palette(data_type, as_cmap=False):\n \"\"\"Select a palette from the ColorBrewer set.\n\n These palettes are built into matplotlib and can be used by name in\n many seaborn functions, or by passing the object returned by this function.\n\n Parameters\n ----------\n data_type : {'sequential', 'diverging', 'qualitative'}\n This describes the kind of data you want to visualize. See the seaborn\n color palette docs for more information about how to choose this value.\n Note that you can pass substrings (e.g. 'q' for 'qualitative.\n\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette from selected colors.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n\n \"\"\"\n if data_type.startswith(\"q\") and as_cmap:\n raise ValueError(\"Qualitative palettes cannot be colormaps.\")\n\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if data_type.startswith(\"s\"):\n opts = [\"Greys\", \"Reds\", \"Greens\", \"Blues\", \"Oranges\", \"Purples\",\n \"BuGn\", \"BuPu\", \"GnBu\", \"OrRd\", \"PuBu\", \"PuRd\", \"RdPu\", \"YlGn\",\n \"PuBuGn\", \"YlGnBu\", \"YlOrBr\", \"YlOrRd\"]\n variants = [\"regular\", \"reverse\", \"dark\"]\n\n @interact\n def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 97, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 106, "name": "choose_sequential", "kind": "def", "category": "function", "info": " def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 107, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 115, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 116, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 117, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 119, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 120, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 128, "name": "choose_diverging", "kind": "def", "category": "function", "info": " def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 129, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 134, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 135, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 136, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 138, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 139, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 146, "name": "choose_qualitative", "kind": "def", "category": "function", "info": " def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 147, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1)):\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 148, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 149, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 156, "name": "choose_dark_palette", "kind": "def", "category": "function", "info": "def choose_dark_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a dark sequential palette.\n\n This corresponds with the :func:`dark_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`dark_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 189, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 193, "name": "choose_dark_palette_rgb", "kind": "def", "category": "function", "info": " def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 199, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 200, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 201, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 203, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 204, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 208, "name": "choose_dark_palette_hls", "kind": "def", "category": "function", "info": " def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 214, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 215, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 216, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 218, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 219, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 223, "name": "choose_dark_palette_husl", "kind": "def", "category": "function", "info": " def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 229, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 230, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 231, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 233, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 234, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 241, "name": "choose_light_palette", "kind": "def", "category": "function", "info": "def choose_light_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a light sequential palette.\n\n This corresponds with the :func:`light_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`light_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n dark_palette : Create a sequential palette with dark low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 274, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 278, "name": "choose_light_palette_rgb", "kind": "def", "category": "function", "info": " def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 284, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 285, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 286, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 288, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 289, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 293, "name": "choose_light_palette_hls", "kind": "def", "category": "function", "info": " def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 299, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 300, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 301, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 303, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 304, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 308, "name": "choose_light_palette_husl", "kind": "def", "category": "function", "info": " def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 314, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 315, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 316, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 318, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 319, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 326, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": "def choose_diverging_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to choose a diverging color palette.\n\n This corresponds with the :func:`diverging_palette` function. This kind\n of palette is good for data that range between interesting low values\n and interesting high values with a meaningful midpoint. (For example,\n change scores relative to some baseline value).\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n diverging_palette : Create a diverging color palette or colormap.\n choose_colorbrewer_palette : Interactively choose palettes from the\n colorbrewer set, including diverging palettes.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 356, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 359, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": " def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 360, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_neg=IntSlider(min=0,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 363, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_pos=IntSlider(min=0,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 366, "name": "IntSlider", "kind": "ref", "category": "function", "info": " s=IntSlider(min=0, max=99, value=74),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 367, "name": "IntSlider", "kind": "ref", "category": "function", "info": " l=IntSlider(min=0, max=99, value=50), # noqa: E741\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 368, "name": "IntSlider", "kind": "ref", "category": "function", "info": " sep=IntSlider(min=1, max=50, value=10),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 373, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 374, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 375, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 377, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 378, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 385, "name": "choose_cubehelix_palette", "kind": "def", "category": "function", "info": "def choose_cubehelix_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to create a sequential cubehelix palette.\n\n This corresponds with the :func:`cubehelix_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values. The cubehelix system allows the\n palette to have more hue variance across the range, which can be helpful\n for distinguishing a wider range of values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 415, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 418, "name": "choose_cubehelix", "kind": "def", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 418, "name": "IntSlider", "kind": "ref", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 419, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " start=FloatSlider(min=0, max=3, value=0),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 420, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " rot=FloatSlider(min=-1, max=1, value=.4),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 421, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " gamma=FloatSlider(min=0, max=5, value=1),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 422, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " hue=FloatSlider(min=0, max=1, value=.8),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 423, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " light=FloatSlider(min=0, max=1, value=.85),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 424, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " dark=FloatSlider(min=0, max=1, value=.15),\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 428, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = cubehelix_palette(256, start, rot, gamma,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 430, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 431, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 433, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n"}, {"fname": "playground/9fa08688-18a7-4267-b745-e2506fac0a62/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 435, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}] \ No newline at end of file diff --git a/tags_mwaskom__seaborn-3010.json b/tags_mwaskom__seaborn-3010.json new file mode 100644 index 0000000000000000000000000000000000000000..1acff98b0a2cb0e038c5cf64b6fd174a679934fc --- /dev/null +++ b/tags_mwaskom__seaborn-3010.json @@ -0,0 +1 @@ +[{"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/ci/check_gallery.py", "rel_fname": "ci/check_gallery.py", "line": 12, "name": "read", "kind": "ref", "category": "function", "info": " exec(fid.read())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 18, "name": "abspath", "kind": "ref", "category": "function", "info": "sys.path.insert(0, os.path.abspath('sphinxext'))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 125, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(path):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 21, "name": "execfile", "kind": "def", "category": "function", "info": "def execfile(filename, globals=None, locals=None):\n with open(filename, \"rb\") as fp:\n exec(compile(fp.read(), filename, 'exec'), globals, locals)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 127, "name": "create_thumbnail", "kind": "def", "category": "function", "info": "def create_thumbnail(infile, thumbfile,\n width=275, height=275,\n cx=0.5, cy=0.5, border=4):\n baseout, extout = op.splitext(thumbfile)\n\n im = matplotlib.image.imread(infile)\n rows, cols = im.shape[:2]\n x0 = int(cx * cols - .5 * width)\n y0 = int(cy * rows - .5 * height)\n xslice = slice(x0, x0 + width)\n yslice = slice(y0, y0 + height)\n thumb = im[yslice, xslice]\n thumb[:border, :, :3] = thumb[-border:, :, :3] = 0\n thumb[:, :border, :3] = thumb[:, -border:, :3] = 0\n\n dpi = 100\n fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)\n\n ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n frameon=False, xticks=[], yticks=[])\n if all(thumb.shape):\n ax.imshow(thumb, aspect='auto', resample=True,\n interpolation='bilinear')\n else:\n warnings.warn(\n f\"Bad thumbnail crop. {thumbfile} will be empty.\"\n )\n fig.savefig(thumbfile, dpi=dpi)\n return fig\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 145, "name": "add_axes", "kind": "ref", "category": "function", "info": " ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 158, "name": "indent", "kind": "def", "category": "function", "info": "def indent(s, N=4):\n \"\"\"indent a string\"\"\"\n return s.replace('\\n', '\\n' + N * ' ')\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 163, "name": "ExampleGenerator", "kind": "def", "category": "class", "info": "__init__\tdirname\tfname\tmodulename\tpyfilename\trstfilename\thtmlfilename\tpngfilename\tthumbfilename\tsphinxtag\tpagetitle\tplotfunc\tcomponents\textract_docstring\texec_file\ttoctree_entry\tcontents_entry"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 169, "name": "extract_docstring", "kind": "ref", "category": "function", "info": " self.extract_docstring()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 179, "name": "exec_file", "kind": "ref", "category": "function", "info": " self.exec_file()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 188, "name": "fname", "kind": "def", "category": "function", "info": " def fname(self):\n return op.split(self.filename)[1]\n\n @property\n def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 192, "name": "modulename", "kind": "def", "category": "function", "info": " def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 196, "name": "pyfilename", "kind": "def", "category": "function", "info": " def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 200, "name": "rstfilename", "kind": "def", "category": "function", "info": " def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 204, "name": "htmlfilename", "kind": "def", "category": "function", "info": " def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 208, "name": "pngfilename", "kind": "def", "category": "function", "info": " def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 213, "name": "thumbfilename", "kind": "def", "category": "function", "info": " def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 218, "name": "sphinxtag", "kind": "def", "category": "function", "info": " def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 222, "name": "pagetitle", "kind": "def", "category": "function", "info": " def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 226, "name": "plotfunc", "kind": "def", "category": "function", "info": " def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 239, "name": "components", "kind": "def", "category": "function", "info": " def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 251, "name": "extract_docstring", "kind": "def", "category": "function", "info": " def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 254, "name": "readlines", "kind": "ref", "category": "function", "info": " lines = open(self.filename).readlines()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 294, "name": "exec_file", "kind": "def", "category": "function", "info": " def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 300, "name": "execfile", "kind": "ref", "category": "function", "info": " execfile(self.filename, my_globals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 310, "name": "create_thumbnail", "kind": "ref", "category": "function", "info": " create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 312, "name": "toctree_entry", "kind": "def", "category": "function", "info": " def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 315, "name": "contents_entry", "kind": "def", "category": "function", "info": " def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 362, "name": "ExampleGenerator", "kind": "ref", "category": "function", "info": " ex = ExampleGenerator(filename, target_dir)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 377, "name": "toctree_entry", "kind": "ref", "category": "function", "info": " toctree += ex.toctree_entry()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 378, "name": "contents_entry", "kind": "ref", "category": "function", "info": " contents += ex.contents_entry()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 391, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect('builder-inited', main)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 48, "name": "main", "kind": "def", "category": "function", "info": "def main(app):\n\n content_yaml = Path(app.builder.srcdir) / \"tutorial.yaml\"\n tutorial_rst = Path(app.builder.srcdir) / \"tutorial.rst\"\n\n tutorial_dir = Path(app.builder.srcdir) / \"tutorial\"\n tutorial_dir.mkdir(exist_ok=True)\n\n with open(content_yaml) as fid:\n sections = yaml.load(fid, yaml.BaseLoader)\n\n for section in sections:\n title = section[\"title\"]\n section[\"header\"] = \"\\n\".join([title, \"-\" * len(title)]) if title else \"\"\n\n env = Environment().from_string(TEMPLATE)\n content = env.render(sections=sections)\n\n with open(tutorial_rst, \"w\") as fid:\n fid.write(content)\n\n for section in sections:\n for page in section[\"pages\"]:\n if (\n not (svg_path := tutorial_dir / f\"{page}.svg\").exists()\n or svg_path.stat().st_mtime < Path(__file__).stat().st_mtime\n ):\n write_thumbnail(svg_path, page)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 64, "name": "render", "kind": "ref", "category": "function", "info": " content = env.render(sections=sections)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 67, "name": "write", "kind": "ref", "category": "function", "info": " fid.write(content)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 75, "name": "write_thumbnail", "kind": "ref", "category": "function", "info": " write_thumbnail(svg_path, page)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 78, "name": "write_thumbnail", "kind": "def", "category": "function", "info": "def write_thumbnail(svg_path, page):\n\n with (\n sns.axes_style(\"dark\"),\n sns.plotting_context(\"notebook\"),\n sns.color_palette(\"deep\")\n ):\n fig = globals()[page]()\n for ax in fig.axes:\n ax.set(xticklabels=[], yticklabels=[], xlabel=\"\", ylabel=\"\", title=\"\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fig.tight_layout()\n fig.savefig(svg_path, format=\"svg\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 81, "name": "axes_style", "kind": "ref", "category": "function", "info": " sns.axes_style(\"dark\"),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 82, "name": "plotting_context", "kind": "ref", "category": "function", "info": " sns.plotting_context(\"notebook\"),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 83, "name": "color_palette", "kind": "ref", "category": "function", "info": " sns.color_palette(\"deep\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 90, "name": "tight_layout", "kind": "ref", "category": "function", "info": " fig.tight_layout()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 91, "name": "savefig", "kind": "ref", "category": "function", "info": " fig.savefig(svg_path, format=\"svg\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 94, "name": "introduction", "kind": "def", "category": "function", "info": "def introduction():\n\n tips = sns.load_dataset(\"tips\")\n fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n penguins = sns.load_dataset(\"penguins\")\n\n f = mpl.figure.Figure(figsize=(5, 5))\n with sns.axes_style(\"whitegrid\"):\n f.subplots(2, 2)\n\n sns.scatterplot(\n tips, x=\"total_bill\", y=\"tip\", hue=\"sex\", size=\"size\",\n alpha=.75, palette=[\"C0\", \".5\"], legend=False, ax=f.axes[0],\n )\n sns.kdeplot(\n tips.query(\"size != 5\"), x=\"total_bill\", hue=\"size\",\n palette=\"blend:C0,.5\", fill=True, linewidth=.5,\n legend=False, common_norm=False, ax=f.axes[1],\n )\n sns.lineplot(\n fmri, x=\"timepoint\", y=\"signal\", hue=\"event\",\n errorbar=(\"se\", 2), legend=False, palette=[\"C0\", \".5\"], ax=f.axes[2],\n )\n sns.boxplot(\n penguins, x=\"bill_depth_mm\", y=\"species\", hue=\"sex\",\n whiskerprops=dict(linewidth=1.5), medianprops=dict(linewidth=1.5),\n boxprops=dict(linewidth=1.5), capprops=dict(linewidth=0),\n width=.5, palette=[\"C0\", \".8\"], whis=5, ax=f.axes[3],\n )\n f.axes[3].legend_ = None\n for ax in f.axes:\n ax.set(xticks=[], yticks=[])\n return f\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 96, "name": "load_dataset", "kind": "ref", "category": "function", "info": " tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 97, "name": "load_dataset", "kind": "ref", "category": "function", "info": " fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 97, "name": "query", "kind": "ref", "category": "function", "info": " fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 98, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 100, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 101, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 102, "name": "subplots", "kind": "ref", "category": "function", "info": " f.subplots(2, 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 104, "name": "scatterplot", "kind": "ref", "category": "function", "info": " sns.scatterplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 108, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 109, "name": "query", "kind": "ref", "category": "function", "info": " tips.query(\"size != 5\"), x=\"total_bill\", hue=\"size\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 113, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 117, "name": "boxplot", "kind": "ref", "category": "function", "info": " sns.boxplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 129, "name": "function_overview", "kind": "def", "category": "function", "info": "def function_overview():\n\n from matplotlib.patches import FancyBboxPatch\n\n f = mpl.figure.Figure(figsize=(7, 5))\n with sns.axes_style(\"white\"):\n ax = f.subplots()\n f.subplots_adjust(0, 0, 1, 1)\n ax.set_axis_off()\n ax.set(xlim=(0, 1), ylim=(0, 1))\n\n deep = sns.color_palette(\"deep\")\n colors = dict(relational=deep[0], distributions=deep[1], categorical=deep[2])\n dark = sns.color_palette(\"dark\")\n text_colors = dict(relational=dark[0], distributions=dark[1], categorical=dark[2])\n\n functions = dict(\n relational=[\"scatterplot\", \"lineplot\"],\n distributions=[\"histplot\", \"kdeplot\", \"ecdfplot\", \"rugplot\"],\n categorical=[\n \"stripplot\", \"swarmplot\", \"boxplot\", \"violinplot\", \"pointplot\", \"barplot\"\n ],\n )\n pad, w, h = .06, .2, .15\n xs, y = np.arange(0, 1, 1 / 3) + pad * 1.05, .7\n for x, mod in zip(xs, functions):\n color = colors[mod] + (.2,)\n text_color = text_colors[mod]\n ax.add_artist(FancyBboxPatch((x, y), w, h, f\"round,pad={pad}\", color=\"white\"))\n ax.add_artist(FancyBboxPatch(\n (x, y), w, h, f\"round,pad={pad}\",\n linewidth=1, edgecolor=text_color, facecolor=color,\n ))\n ax.text(\n x + w / 2, y + h / 2, f\"{mod[:3]}plot\\n({mod})\",\n ha=\"center\", va=\"center\", size=20, color=text_color\n )\n for i, func in enumerate(functions[mod]):\n x_i, y_i = x + w / 2, y - i * .1 - h / 2 - pad\n xy = x_i - w / 2, y_i - pad / 3\n ax.add_artist(\n FancyBboxPatch(xy, w, h / 4, f\"round,pad={pad / 3}\", color=\"white\")\n )\n ax.add_artist(FancyBboxPatch(\n xy, w, h / 4, f\"round,pad={pad / 3}\",\n linewidth=1, edgecolor=text_color, facecolor=color\n ))\n ax.text(x_i, y_i, func, ha=\"center\", va=\"center\", size=16, color=text_color)\n ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)\n return f\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 133, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(7, 5))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 134, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 135, "name": "subplots", "kind": "ref", "category": "function", "info": " ax = f.subplots()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 136, "name": "subplots_adjust", "kind": "ref", "category": "function", "info": " f.subplots_adjust(0, 0, 1, 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 137, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 140, "name": "color_palette", "kind": "ref", "category": "function", "info": " deep = sns.color_palette(\"deep\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 142, "name": "color_palette", "kind": "ref", "category": "function", "info": " dark = sns.color_palette(\"dark\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 157, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch((x, y), w, h, f\"round,pad={pad}\", color=\"white\"))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 158, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 162, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 169, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 172, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 176, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(x_i, y_i, func, ha=\"center\", va=\"center\", size=16, color=text_color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 177, "name": "plot", "kind": "ref", "category": "function", "info": " ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 181, "name": "data_structure", "kind": "def", "category": "function", "info": "def data_structure():\n\n f = mpl.figure.Figure(figsize=(7, 5))\n gs = mpl.gridspec.GridSpec(\n figure=f, ncols=6, nrows=2, height_ratios=(1, 20),\n left=0, right=.35, bottom=0, top=.9, wspace=.1, hspace=.01\n )\n colors = [c + (.5,) for c in sns.color_palette(\"deep\")]\n f.add_subplot(gs[0, :], facecolor=\".8\")\n for i in range(gs.ncols):\n f.add_subplot(gs[1:, i], facecolor=colors[i])\n\n gs = mpl.gridspec.GridSpec(\n figure=f, ncols=2, nrows=2, height_ratios=(1, 8), width_ratios=(1, 11),\n left=.4, right=1, bottom=.2, top=.8, wspace=.015, hspace=.02\n )\n f.add_subplot(gs[0, 1:], facecolor=colors[2])\n f.add_subplot(gs[1:, 0], facecolor=colors[1])\n f.add_subplot(gs[1, 1], facecolor=colors[0])\n return f\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 183, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(7, 5))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 184, "name": "GridSpec", "kind": "ref", "category": "function", "info": " gs = mpl.gridspec.GridSpec(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 188, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = [c + (.5,) for c in sns.color_palette(\"deep\")]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 189, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[0, :], facecolor=\".8\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 191, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1:, i], facecolor=colors[i])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 193, "name": "GridSpec", "kind": "ref", "category": "function", "info": " gs = mpl.gridspec.GridSpec(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 197, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[0, 1:], facecolor=colors[2])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 198, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1:, 0], facecolor=colors[1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 199, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1, 1], facecolor=colors[0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 203, "name": "error_bars", "kind": "def", "category": "function", "info": "def error_bars():\n\n diamonds = sns.load_dataset(\"diamonds\")\n with sns.axes_style(\"whitegrid\"):\n g = sns.catplot(\n diamonds, x=\"carat\", y=\"clarity\", hue=\"clarity\", kind=\"point\",\n errorbar=(\"sd\", .5), join=False, legend=False, facet_kws={\"despine\": False},\n palette=\"ch:s=-.2,r=-.2,d=.4,l=.6_r\", scale=.75, capsize=.3,\n )\n g.ax.yaxis.set_inverted(False)\n return g.figure\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 205, "name": "load_dataset", "kind": "ref", "category": "function", "info": " diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 206, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 207, "name": "catplot", "kind": "ref", "category": "function", "info": " g = sns.catplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 212, "name": "set_inverted", "kind": "ref", "category": "function", "info": " g.ax.yaxis.set_inverted(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 218, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 223, "name": "Plot", "kind": "ref", "category": "function", "info": " p = so.Plot(x, y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 226, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps), color=map(str, x)),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 227, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\", pointsize=ps), alpha=x),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 228, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".9\", pointsize=ps, edgewidth=2), edgecolor=x),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 229, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\"), pointsize=x).scale(pointsize=(4, 18)),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 229, "name": "scale", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\"), pointsize=x).scale(pointsize=(4, 18)),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 230, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".9\", edgecolor=\".2\"), edgewidth=x),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 231, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".3\"), marker=map(str, x)),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 232, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".3\", marker=\"x\"), stroke=x),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 235, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 236, "name": "subplots", "kind": "ref", "category": "function", "info": " axs = f.subplots(len(plots))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 238, "name": "on", "kind": "ref", "category": "function", "info": " p.on(ax).plot()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 238, "name": "plot", "kind": "ref", "category": "function", "info": " p.on(ax).plot()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 240, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=ax, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 245, "name": "objects_interface", "kind": "def", "category": "function", "info": "def objects_interface():\n\n f = mpl.figure.Figure(figsize=(5, 4))\n C = sns.color_palette(\"deep\")\n ax = f.subplots()\n fontsize = 22\n rects = [((.135, .50), .69), ((.275, .38), .26), ((.59, .38), .40)]\n for i, (xy, w) in enumerate(rects):\n ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n ax.text(0, .52, \"Plot(data, 'x', 'y', color='var1')\", size=fontsize, color=\".2\")\n ax.text(0, .40, \".add(Dot(alpha=.5), marker='var2')\", size=fontsize, color=\".2\")\n annots = [\n (\"Mapped\\nin all layers\", (.48, .62), (0, 55)),\n (\"Set directly\", (.41, .35), (0, -55)),\n (\"Mapped\\nin this layer\", (.80, .35), (0, -55)),\n ]\n for i, (text, xy, xytext) in enumerate(annots):\n ax.annotate(\n text, xy, xytext,\n textcoords=\"offset points\", fontsize=18, ha=\"center\", va=\"center\",\n arrowprops=dict(arrowstyle=\"->\", linewidth=1.5, color=C[i]), color=C[i],\n )\n ax.set_axis_off()\n f.subplots_adjust(0, 0, 1, 1)\n\n return f\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 247, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 4))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 248, "name": "color_palette", "kind": "ref", "category": "function", "info": " C = sns.color_palette(\"deep\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 249, "name": "subplots", "kind": "ref", "category": "function", "info": " ax = f.subplots()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 253, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 253, "name": "Rectangle", "kind": "ref", "category": "function", "info": " ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 254, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(0, .52, \"Plot(data, 'x', 'y', color='var1')\", size=fontsize, color=\".2\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 255, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(0, .40, \".add(Dot(alpha=.5), marker='var2')\", size=fontsize, color=\".2\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 262, "name": "annotate", "kind": "ref", "category": "function", "info": " ax.annotate(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 267, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 268, "name": "subplots_adjust", "kind": "ref", "category": "function", "info": " f.subplots_adjust(0, 0, 1, 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 273, "name": "relational", "kind": "def", "category": "function", "info": "def relational():\n\n mpg = sns.load_dataset(\"mpg\")\n with sns.axes_style(\"ticks\"):\n g = sns.relplot(\n data=mpg, x=\"horsepower\", y=\"mpg\", size=\"displacement\", hue=\"weight\",\n sizes=(50, 500), hue_norm=(2000, 4500), alpha=.75, legend=False,\n palette=\"ch:start=-.5,rot=.7,dark=.3,light=.7_r\",\n )\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 275, "name": "load_dataset", "kind": "ref", "category": "function", "info": " mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 276, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 277, "name": "relplot", "kind": "ref", "category": "function", "info": " g = sns.relplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 282, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 286, "name": "distributions", "kind": "def", "category": "function", "info": "def distributions():\n\n penguins = sns.load_dataset(\"penguins\").dropna()\n with sns.axes_style(\"white\"):\n g = sns.displot(\n penguins, x=\"flipper_length_mm\", row=\"island\",\n binwidth=4, kde=True, line_kws=dict(linewidth=2), legend=False,\n )\n sns.despine(left=True)\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 288, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 288, "name": "dropna", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 289, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 290, "name": "displot", "kind": "ref", "category": "function", "info": " g = sns.displot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 294, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 295, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 299, "name": "categorical", "kind": "def", "category": "function", "info": "def categorical():\n\n penguins = sns.load_dataset(\"penguins\").dropna()\n with sns.axes_style(\"whitegrid\"):\n g = sns.catplot(\n penguins, x=\"sex\", y=\"body_mass_g\", hue=\"island\", col=\"sex\",\n kind=\"box\", whis=np.inf, legend=False, sharex=False,\n )\n sns.despine(left=True)\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 301, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 301, "name": "dropna", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 302, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 303, "name": "catplot", "kind": "ref", "category": "function", "info": " g = sns.catplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 307, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 308, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 312, "name": "regression", "kind": "def", "category": "function", "info": "def regression():\n\n anscombe = sns.load_dataset(\"anscombe\")\n with sns.axes_style(\"white\"):\n g = sns.lmplot(\n anscombe, x=\"x\", y=\"y\", hue=\"dataset\", col=\"dataset\", col_wrap=2,\n scatter_kws=dict(edgecolor=\".2\", facecolor=\".7\", s=80),\n line_kws=dict(lw=4), ci=None,\n )\n g.set(xlim=(2, None), ylim=(2, None))\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 314, "name": "load_dataset", "kind": "ref", "category": "function", "info": " anscombe = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 315, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 316, "name": "lmplot", "kind": "ref", "category": "function", "info": " g = sns.lmplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 322, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 326, "name": "axis_grids", "kind": "def", "category": "function", "info": "def axis_grids():\n\n penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n with sns.axes_style(\"ticks\"):\n g = sns.pairplot(\n penguins.drop(\"flipper_length_mm\", axis=1),\n diag_kind=\"kde\", diag_kws=dict(fill=False),\n plot_kws=dict(s=40, fc=\"none\", ec=\"C0\", alpha=.75, linewidth=.75),\n )\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 328, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 328, "name": "sample", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 329, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 330, "name": "pairplot", "kind": "ref", "category": "function", "info": " g = sns.pairplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 331, "name": "drop", "kind": "ref", "category": "function", "info": " penguins.drop(\"flipper_length_mm\", axis=1),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 335, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 339, "name": "aesthetics", "kind": "def", "category": "function", "info": "def aesthetics():\n\n f = mpl.figure.Figure(figsize=(5, 5))\n for i, style in enumerate([\"darkgrid\", \"white\", \"ticks\", \"whitegrid\"], 1):\n with sns.axes_style(style):\n ax = f.add_subplot(2, 2, i)\n ax.set(xticks=[0, .25, .5, .75, 1], yticks=[0, .25, .5, .75, 1])\n sns.despine(ax=f.axes[1])\n sns.despine(ax=f.axes[2])\n return f\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 341, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 343, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(style):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 344, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax = f.add_subplot(2, 2, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 346, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=f.axes[1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 347, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=f.axes[2])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 351, "name": "color_palettes", "kind": "def", "category": "function", "info": "def color_palettes():\n\n f = mpl.figure.Figure(figsize=(5, 5))\n palettes = [\"deep\", \"husl\", \"gray\", \"ch:\", \"mako\", \"vlag\", \"icefire\"]\n axs = f.subplots(len(palettes))\n x = np.arange(10)\n for ax, name in zip(axs, palettes):\n cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n ax.pcolormesh(x[None, :], linewidth=.5, edgecolor=\"w\", alpha=.8, cmap=cmap)\n ax.set_axis_off()\n return f\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 353, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 355, "name": "subplots", "kind": "ref", "category": "function", "info": " axs = f.subplots(len(palettes))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 358, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 358, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 359, "name": "pcolormesh", "kind": "ref", "category": "function", "info": " ax.pcolormesh(x[None, :], linewidth=.5, edgecolor=\"w\", alpha=.8, cmap=cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 360, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 364, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect(\"builder-inited\", main)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 365, "name": "connect", "kind": "ref", "category": "function", "info": " app.connect(\"builder-inited\", main)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 9, "name": "line_type", "kind": "def", "category": "function", "info": "def line_type(line):\n\n if line.startswith(\" \"):\n return \"code\"\n else:\n return \"markdown\"\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 17, "name": "add_cell", "kind": "def", "category": "function", "info": "def add_cell(nb, lines, cell_type):\n\n cell_objs = {\n \"code\": nbformat.v4.new_code_cell,\n \"markdown\": nbformat.v4.new_markdown_cell,\n }\n text = \"\\n\".join(lines)\n cell = cell_objs[cell_type](text)\n nb[\"cells\"].append(cell)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 36, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " lines = NumpyDocString(pydoc.getdoc(obj))[\"Examples\"]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 41, "name": "new_notebook", "kind": "ref", "category": "function", "info": " nb = nbformat.v4.new_notebook()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 57, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) != cell_type:\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 60, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 61, "name": "line_type", "kind": "ref", "category": "function", "info": " cell_type = line_type(line)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 64, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) == \"code\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 70, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 72, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f\"docstrings/{name}.ipynb\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 14, "name": "poisson_disc_sample", "kind": "def", "category": "function", "info": "def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):\n \"\"\"Find positions using poisson-disc sampling.\"\"\"\n # See http://bost.ocks.org/mike/algorithms/\n rng = np.random.default_rng(seed)\n uniform = rng.uniform\n randint = rng.integers\n\n # Cache the results\n key = array_radius, pad_radius, seed\n if key in XY_CACHE:\n return XY_CACHE[key]\n\n # Start at a fixed point we know will work\n start = np.zeros(d)\n samples = [start]\n queue = [start]\n\n while queue:\n\n # Pick a sample to expand from\n s_idx = randint(len(queue))\n s = queue[s_idx]\n\n for i in range(candidates):\n # Generate a candidate from this sample\n coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n\n # Check the three conditions to accept the candidate\n in_array = np.sqrt(np.sum(coords ** 2)) < array_radius\n in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)\n\n if in_array and in_ring:\n # Accept the candidate\n samples.append(coords)\n queue.append(coords)\n break\n\n if (i + 1) == candidates:\n # We've exhausted the particular sample\n queue.pop(s_idx)\n\n samples = np.array(samples)\n XY_CACHE[key] = samples\n return samples\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 17, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 34, "name": "randint", "kind": "ref", "category": "function", "info": " s_idx = randint(len(queue))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 39, "name": "uniform", "kind": "ref", "category": "function", "info": " coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 60, "name": "logo", "kind": "def", "category": "function", "info": "def logo(\n ax,\n color_kws, ring, ring_idx, edge,\n pdf_means, pdf_sigma, dy, y0, w, h,\n hist_mean, hist_sigma, hist_y0, lw, skip,\n scatter, pad, scale,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 70, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 71, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect('equal')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 79, "name": "gaussian", "kind": "ref", "category": "function", "info": " y = gaussian(x.size, pdf_sigma)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 97, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 104, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(bg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 115, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(wedge)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 120, "name": "gaussian", "kind": "ref", "category": "function", "info": " hist_y = gaussian(x.size, hist_sigma)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 133, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(fg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 138, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " u.set_clip_path(fg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 143, "name": "poisson_disc_sample", "kind": "ref", "category": "function", "info": " xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 153, "name": "get_paths", "kind": "ref", "category": "function", "info": " path = u.get_paths()[0]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "get_transform", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 155, "name": "set_visible", "kind": "ref", "category": "function", "info": " u.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 182, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " color = sns.cubehelix_palette(**kwargs[\"color_kws\"])[color_idx]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 187, "name": "logo", "kind": "ref", "category": "function", "info": " logo(ax, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 194, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 204, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 212, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 222, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 40, "name": "MetadataError", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 44, "name": "pop_recursive", "kind": "def", "category": "function", "info": "def pop_recursive(d, key, default=None):\n \"\"\"dict.pop(key) where `key` is a `.`-delimited list of nested keys.\n >>> d = {'a': {'b': 1, 'c': 2}}\n >>> pop_recursive(d, 'a.c')\n 2\n >>> d\n {'a': {'b': 1}}\n \"\"\"\n nested = key.split('.')\n current = d\n for k in nested[:-1]:\n if hasattr(current, 'get'):\n current = current.get(k, {})\n else:\n return default\n if not hasattr(current, 'pop'):\n return default\n return current.pop(nested[-1], default)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 64, "name": "strip_output", "kind": "def", "category": "function", "info": "def strip_output(nb):\n \"\"\"\n Strip the outputs, execution count/prompt number and miscellaneous\n metadata from a notebook object, unless specified to keep either the\n outputs or counts.\n \"\"\"\n keys = {'metadata': [], 'cell': {'metadata': [\"execution\"]}}\n\n nb.metadata.pop('signature', None)\n nb.metadata.pop('widgets', None)\n\n for field in keys['metadata']:\n pop_recursive(nb.metadata, field)\n\n if 'NB_KERNEL' in os.environ:\n nb.metadata['kernelspec']['name'] = os.environ['NB_KERNEL']\n nb.metadata['kernelspec']['display_name'] = os.environ['NB_KERNEL']\n\n for cell in nb.cells:\n\n if 'outputs' in cell:\n cell['outputs'] = []\n if 'prompt_number' in cell:\n cell['prompt_number'] = None\n if 'execution_count' in cell:\n cell['execution_count'] = None\n\n # Always remove this metadata\n for output_style in ['collapsed', 'scrolled']:\n if output_style in cell.metadata:\n cell.metadata[output_style] = False\n if 'metadata' in cell:\n for field in ['collapsed', 'scrolled', 'ExecuteTime']:\n cell.metadata.pop(field, None)\n for (extra, fields) in keys['cell'].items():\n if extra in cell:\n for field in fields:\n pop_recursive(getattr(cell, extra), field)\n return nb\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 76, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(nb.metadata, field)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 101, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(getattr(cell, extra), field)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 120, "name": "ExecutePreprocessor", "kind": "ref", "category": "function", "info": " ep = ExecutePreprocessor(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 125, "name": "preprocess", "kind": "ref", "category": "function", "info": " ep.preprocess(nb, {\"metadata\": {\"path\": basedir}})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 142, "name": "RSTExporter", "kind": "ref", "category": "function", "info": " exp = RSTExporter()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 151, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 151, "name": "TagRemovePreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "ExtractOutputPreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 154, "name": "from_notebook_node", "kind": "ref", "category": "function", "info": " body, resources = exp.from_notebook_node(nb)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 157, "name": "strip_output", "kind": "ref", "category": "function", "info": " nb = strip_output(nb)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 168, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(imdir):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 15, "name": "read", "kind": "ref", "category": "function", "info": " nb = nbformat.read(f, as_version=4)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 21, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "sns.lmplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 12, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f, left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 19, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=\"carat\", y=\"price\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "fmri = sns.load_dataset(\"fmri\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 14, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(x=\"timepoint\", y=\"signal\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 10, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "dots = sns.load_dataset(\"dots\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 13, "name": "color_palette", "kind": "ref", "category": "function", "info": "palette = sns.color_palette(\"rocket_r\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 16, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 12, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 18, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Body mass (g)\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 19, "name": "set_title", "kind": "ref", "category": "function", "info": "g.legend.set_title(\"\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\", palette=\"pastel\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 14, "name": "boxplot", "kind": "ref", "category": "function", "info": "sns.boxplot(x=\"day\", y=\"total_bill\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(offset=10, trim=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 13, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=tips, x=\"day\", y=\"total_bill\", hue=\"smoker\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": " .get_level_values(\"network\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "corr", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "stack", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "reset_index", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 26, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 35, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 36, "name": "margins", "kind": "ref", "category": "function", "info": "g.ax.margins(.02)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 37, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": "for label in g.ax.get_xticklabels():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 38, "name": "set_rotation", "kind": "ref", "category": "function", "info": " label.set_rotation(90)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 40, "name": "set_edgecolor", "kind": "ref", "category": "function", "info": " artist.set_edgecolor(\".7\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(11)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 11, "name": "gamma", "kind": "ref", "category": "function", "info": "x = rs.gamma(2, size=1000)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = -.5 * x + rs.normal(size=1000)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 14, "name": "jointplot", "kind": "ref", "category": "function", "info": "sns.jointplot(x=x, y=y, kind=\"hex\", color=\"#4CB391\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 18, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 27, "name": "set_major_formatter", "kind": "ref", "category": "function", "info": "ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 28, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks([500, 1000, 2000, 5000, 10000])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 13, "name": "set_xscale", "kind": "ref", "category": "function", "info": "ax.set_xscale(\"log\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 16, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 23, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(x=\"distance\", y=\"method\", data=planets,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(trim=True, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 20, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 28, "name": "pointplot", "kind": "ref", "category": "function", "info": "sns.pointplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 35, "name": "move_legend", "kind": "ref", "category": "function", "info": "sns.move_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 12, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=planets, x=\"year\", y=\"distance\", marginal_ticks=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 18, "name": "add_axes", "kind": "ref", "category": "function", "info": "cax = g.figure.add_axes([.15, .55, .02, .2])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 21, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 25, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, element=\"step\", color=\"#03012d\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 13, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", rc={\"axes.facecolor\": (0, 0, 0, 0)})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 13, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(1979)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 14, "name": "randn", "kind": "ref", "category": "function", "info": "x = rs.randn(500)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 21, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "pal = sns.cubehelix_palette(10, rot=-.25, light=.7)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 22, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, row=\"g\", hue=\"g\", aspect=15, height=.5, palette=pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 31, "name": "refline", "kind": "ref", "category": "function", "info": "g.refline(y=0, linewidth=2, linestyle=\"-\", color=None, clip_on=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 35, "name": "label", "kind": "def", "category": "function", "info": "def label(x, color, label):\n ax = plt.gca()\n ax.text(0, .2, label, fontweight=\"bold\", color=color,\n ha=\"left\", va=\"center\", transform=ax.transAxes)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 47, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 49, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(bottom=True, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 11, "name": "boxenplot", "kind": "ref", "category": "function", "info": "sns.boxenplot(x=\"clarity\", y=\"carat\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rng = np.random.RandomState(0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 16, "name": "multivariate_normal", "kind": "ref", "category": "function", "info": "x, y = rng.multivariate_normal(mean, cov, n).T\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 20, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=x, y=y, s=5, color=\".15\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 21, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(x=x, y=y, bins=50, pthresh=.1, cmap=\"mako\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 22, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(x=x, y=y, levels=5, color=\"w\", linewidths=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 16, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(x=\"age\", y=\"survived\", col=\"sex\", hue=\"sex\", data=df,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(4)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 16, "name": "randint", "kind": "ref", "category": "function", "info": "pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 24, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "grid = sns.FacetGrid(df, col=\"walk\", hue=\"walk\", palette=\"tab20c\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 28, "name": "refline", "kind": "ref", "category": "function", "info": "grid.refline(y=0, linestyle=\":\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(33)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 16, "name": "normal", "kind": "ref", "category": "function", "info": "d = pd.DataFrame(data=rs.normal(size=(100, 26)),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 20, "name": "corr", "kind": "ref", "category": "function", "info": "corr = d.corr()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 29, "name": "diverging_palette", "kind": "ref", "category": "function", "info": "cmap = sns.diverging_palette(230, 20, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 32, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", color_codes=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=mpg, x=\"mpg\", y=\"acceleration\", space=0, ratio=17)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.scatterplot, size=mpg[\"horsepower\"], sizes=(30, 120),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 14, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.rugplot, height=1, color=\"g\", alpha=.6)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 14, "name": "set_aspect", "kind": "ref", "category": "function", "info": "ax.set_aspect(\"equal\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 17, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 18, "name": "query", "kind": "ref", "category": "function", "info": " data=iris.query(\"species != 'versicolor'\"),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 13, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 11, "name": "blend_palette", "kind": "ref", "category": "function", "info": "cmap = sns.blend_palette(colors, input=\"husl\", as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 12, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 20, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"Snoot length (mm)\", \"Snoot depth (mm)\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 11, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(df, diag_sharey=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 12, "name": "map_upper", "kind": "ref", "category": "function", "info": "g.map_upper(sns.scatterplot, s=15)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 13, "name": "map_lower", "kind": "ref", "category": "function", "info": "g.map_lower(sns.kdeplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 14, "name": "map_diag", "kind": "ref", "category": "function", "info": "g.map_diag(sns.kdeplot, lw=2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "titanic = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 12, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(titanic, y_vars=\"survived\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 19, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(fig=g.fig, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 34, "name": "grid", "kind": "ref", "category": "function", "info": " ax.xaxis.grid(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 35, "name": "grid", "kind": "ref", "category": "function", "info": " ax.yaxis.grid(True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 37, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", context=\"talk\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 9, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(8)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y1, palette=\"rocket\", ax=ax1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 19, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax1.set_ylabel(\"Sequential\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 23, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y2, palette=\"vlag\", ax=ax2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 25, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax2.set_ylabel(\"Diverging\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 28, "name": "choice", "kind": "ref", "category": "function", "info": "y3 = rs.choice(y1, len(y1), replace=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 29, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y3, palette=\"deep\", ax=ax3)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 31, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax3.set_ylabel(\"Qualitative\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 34, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 11, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(50)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 20, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " cmap = sns.cubehelix_palette(start=s, light=1, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 23, "name": "normal", "kind": "ref", "category": "function", "info": " x, y = rs.normal(size=(2, 50))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 24, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 31, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 16, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"pastel\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"total\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 21, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"muted\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 22, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"alcohol\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "exercise = sns.load_dataset(\"exercise\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 13, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 18, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 21, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, col=\"speed\", hue=\"speed\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 10, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(x=\"total_bill\", y=\"tip\", data=tips,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(7)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 11, "name": "normal", "kind": "ref", "category": "function", "info": "x = rs.normal(2, 1, 75)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = 2 + 1.5 * x + rs.normal(0, 2, 75)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 15, "name": "residplot", "kind": "ref", "category": "function", "info": "sns.residplot(x=x, y=y, lowess=True, color=\"g\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(x=\"horsepower\", y=\"mpg\", hue=\"origin\", size=\"weight\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\", palette=\"muted\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 14, "name": "swarmplot", "kind": "ref", "category": "function", "info": "ax = sns.swarmplot(data=df, x=\"body_mass_g\", y=\"sex\", hue=\"species\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 10, "name": "pairplot", "kind": "ref", "category": "function", "info": "sns.pairplot(df, hue=\"species\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 13, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "cmap = sns.cubehelix_palette(rot=-.2, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 21, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.xaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 22, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.yaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 23, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 11, "name": "default_rng", "kind": "ref", "category": "function", "info": "rs = np.random.default_rng(0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 13, "name": "normal", "kind": "ref", "category": "function", "info": "d = rs.normal(0, 2, (n, p))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 17, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=d, palette=\"light:g\", inner=\"points\", orient=\"h\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=df, x=\"body_mass_g\", y=\"bill_depth_mm\", space=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.kdeplot,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 15, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, color=\"#03051A\", alpha=1, bins=25)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights_long = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 11, "name": "pivot", "kind": "ref", "category": "function", "info": "flights = flights_long.pivot(\"month\", \"year\", \"passengers\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 15, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(flights, annot=True, fmt=\"d\", linewidths=.5, ax=ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 10, "name": "catplot", "kind": "ref", "category": "function", "info": "sns.catplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 14, "name": "regplot", "kind": "ref", "category": "function", "info": "sns.regplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 21, "name": "husl_palette", "kind": "ref", "category": "function", "info": "network_pal = sns.husl_palette(8, s=.45)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 25, "name": "get_level_values", "kind": "ref", "category": "function", "info": "networks = df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "clustermap", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "corr", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 11, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 13, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 24, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(.8, .85, year, transform=ax.transAxes, fontweight=\"bold\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 27, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "get_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 36, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 37, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Passengers\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 38, "name": "tight_layout", "kind": "ref", "category": "function", "info": "g.tight_layout()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 12, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(365)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 13, "name": "randn", "kind": "ref", "category": "function", "info": "values = rs.randn(365, 4).cumsum(axis=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 16, "name": "rolling", "kind": "ref", "category": "function", "info": "data = data.rolling(7).mean()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 18, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(data=data, palette=\"tab10\", linewidth=2.5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "corr", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "groupby", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "mean", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 22, "name": "astype", "kind": "ref", "category": "function", "info": "corr_df.index = corr_df.index.astype(int)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 23, "name": "sort_index", "kind": "ref", "category": "function", "info": "corr_df = corr_df.sort_index().T\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 33, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 5, "name": "MarkerStyle", "kind": "def", "category": "function", "info": "def MarkerStyle(marker=None, fillstyle=None):\n \"\"\"\n Allow MarkerStyle to accept a MarkerStyle object as parameter.\n\n Supports matplotlib < 3.3.0\n https://github.com/matplotlib/matplotlib/pull/16692\n\n \"\"\"\n if isinstance(marker, mpl.markers.MarkerStyle):\n if fillstyle is None:\n return marker\n else:\n marker = marker.get_marker()\n return mpl.markers.MarkerStyle(marker, fillstyle)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 17, "name": "get_marker", "kind": "ref", "category": "function", "info": " marker = marker.get_marker()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 18, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return mpl.markers.MarkerStyle(marker, fillstyle)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 21, "name": "norm_from_scale", "kind": "def", "category": "function", "info": "def norm_from_scale(scale, norm):\n \"\"\"Produce a Normalize object given a Scale and min/max domain limits.\"\"\"\n # This is an internal maplotlib function that simplifies things to access\n # It is likely to become part of the matplotlib API at some point:\n # https://github.com/matplotlib/matplotlib/issues/20329\n if isinstance(norm, mpl.colors.Normalize):\n return norm\n\n if scale is None:\n return None\n\n if norm is None:\n vmin = vmax = None\n else:\n vmin, vmax = norm # TODO more helpful error if this fails?\n\n class ScaledNorm(mpl.colors.Normalize):\n\n def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 37, "name": "ScaledNorm", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 39, "name": "__call__", "kind": "def", "category": "function", "info": " def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 42, "name": "process_value", "kind": "ref", "category": "function", "info": " value, is_scalar = self.process_value(value)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 43, "name": "autoscale_None", "kind": "ref", "category": "function", "info": " self.autoscale_None(value)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 53, "name": "transform", "kind": "ref", "category": "function", "info": " t_value = self.transform(value).reshape(np.shape(value))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 54, "name": "transform", "kind": "ref", "category": "function", "info": " t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 60, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " t_value = np.ma.masked_invalid(t_value, copy=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 63, "name": "ScaledNorm", "kind": "ref", "category": "function", "info": " new_norm = ScaledNorm(vmin, vmax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 64, "name": "get_transform", "kind": "ref", "category": "function", "info": " new_norm.transform = scale.get_transform().transform\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 69, "name": "scale_factory", "kind": "def", "category": "function", "info": "def scale_factory(scale, axis, **kwargs):\n \"\"\"\n Backwards compatability for creation of independent scales.\n\n Matplotlib scales require an Axis object for instantiation on < 3.4.\n But the axis is not used, aside from extraction of the axis_name in LogScale.\n\n \"\"\"\n modify_transform = False\n if Version(mpl.__version__) < Version(\"3.4\"):\n if axis[0] in \"xy\":\n modify_transform = True\n axis = axis[0]\n base = kwargs.pop(\"base\", None)\n if base is not None:\n kwargs[f\"base{axis}\"] = base\n nonpos = kwargs.pop(\"nonpositive\", None)\n if nonpos is not None:\n kwargs[f\"nonpos{axis}\"] = nonpos\n\n if isinstance(scale, str):\n class Axis:\n axis_name = axis\n axis = Axis()\n\n scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n\n if modify_transform:\n transform = scale.get_transform()\n transform.base = kwargs.get(\"base\", 10)\n if kwargs.get(\"nonpositive\") == \"mask\":\n # Setting a private attribute, but we only get here\n # on an old matplotlib, so this won't break going forwards\n transform._clip = False\n\n return scale\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 90, "name": "Axis", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 92, "name": "Axis", "kind": "ref", "category": "function", "info": " axis = Axis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 94, "name": "scale_factory", "kind": "ref", "category": "function", "info": " scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 97, "name": "get_transform", "kind": "ref", "category": "function", "info": " transform = scale.get_transform()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 107, "name": "set_scale_obj", "kind": "def", "category": "function", "info": "def set_scale_obj(ax, axis, scale):\n \"\"\"Handle backwards compatability with setting matplotlib scale.\"\"\"\n if Version(mpl.__version__) < Version(\"3.4\"):\n # The ability to pass a BaseScale instance to Axes.set_{}scale was added\n # to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089\n # Workaround: use the scale name, which is restrictive only if the user\n # wants to define a custom scale; they'll need to update the registry too.\n if scale.name is None:\n # Hack to support our custom Formatter-less CatScale\n return\n method = getattr(ax, f\"set_{axis}scale\")\n kws = {}\n if scale.name == \"function\":\n trans = scale.get_transform()\n kws[\"functions\"] = (trans._forward, trans._inverse)\n method(scale.name, **kws)\n axis_obj = getattr(ax, f\"{axis}axis\")\n scale.set_default_locators_and_formatters(axis_obj)\n else:\n ax.set(**{f\"{axis}scale\": scale})\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 120, "name": "get_transform", "kind": "ref", "category": "function", "info": " trans = scale.get_transform()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 122, "name": "method", "kind": "ref", "category": "function", "info": " method(scale.name, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 124, "name": "set_default_locators_and_formatters", "kind": "ref", "category": "function", "info": " scale.set_default_locators_and_formatters(axis_obj)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 129, "name": "get_colormap", "kind": "def", "category": "function", "info": "def get_colormap(name):\n \"\"\"Handle changes to matplotlib colormap interface in 3.6.\"\"\"\n try:\n return mpl.colormaps[name]\n except AttributeError:\n return mpl.cm.get_cmap(name)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 134, "name": "get_cmap", "kind": "ref", "category": "function", "info": " return mpl.cm.get_cmap(name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 137, "name": "register_colormap", "kind": "def", "category": "function", "info": "def register_colormap(name, cmap):\n \"\"\"Handle changes to matplotlib colormap interface in 3.6.\"\"\"\n try:\n if name not in mpl.colormaps:\n mpl.colormaps.register(cmap, name=name)\n except AttributeError:\n mpl.cm.register_cmap(name, cmap)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 141, "name": "register", "kind": "ref", "category": "function", "info": " mpl.colormaps.register(cmap, name=name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 143, "name": "register_cmap", "kind": "ref", "category": "function", "info": " mpl.cm.register_cmap(name, cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 146, "name": "set_layout_engine", "kind": "def", "category": "function", "info": "def set_layout_engine(fig, engine):\n \"\"\"Handle changes to auto layout engine interface in 3.6\"\"\"\n if hasattr(fig, \"set_layout_engine\"):\n fig.set_layout_engine(engine)\n else:\n if engine == \"tight\":\n fig.set_tight_layout(True)\n elif engine == \"constrained\":\n fig.set_constrained_layout(True)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 149, "name": "set_layout_engine", "kind": "ref", "category": "function", "info": " fig.set_layout_engine(engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 152, "name": "set_tight_layout", "kind": "ref", "category": "function", "info": " fig.set_tight_layout(True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 154, "name": "set_constrained_layout", "kind": "ref", "category": "function", "info": " fig.set_constrained_layout(True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 157, "name": "share_axis", "kind": "def", "category": "function", "info": "def share_axis(ax0, ax1, which):\n \"\"\"Handle changes to post-hoc axis sharing.\"\"\"\n if Version(mpl.__version__) < Version(\"3.5.0\"):\n group = getattr(ax0, f\"get_shared_{which}_axes\")()\n group.join(ax1, ax0)\n else:\n getattr(ax1, f\"share{which}\")(ax0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 159, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.5.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 159, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.5.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 16, "name": "PlotData", "kind": "def", "category": "class", "info": "__init__\t__contains__\tjoin\t_assign_variables"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 56, "name": "_assign_variables", "kind": "ref", "category": "function", "info": " frame, names, ids = self._assign_variables(data, variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 92, "name": "PlotData", "kind": "ref", "category": "function", "info": " new = PlotData(data, variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 120, "name": "_assign_variables", "kind": "def", "category": "function", "info": " def _assign_variables(\n self,\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataFrame, dict[str, str | None], dict[str, str | int]]:\n \"\"\"\n Assign values for plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data\n Input data where variable names map to vector values.\n variables\n Keys are names of plot variables (x, y, ...) each value is one of:\n\n - name of a column (or index level, or dictionary entry) in `data`\n - vector in any format that can construct a :class:`pandas.DataFrame`\n\n Returns\n -------\n frame\n Table mapping seaborn variables (x, y, color, ...) to data vectors.\n names\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n ids\n Like the `names` dict, but `None` values are replaced by the `id()`\n of the data object that defined the variable.\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in `data`, or when they are\n non-indexed vector datatypes that have a different length from `data`.\n\n \"\"\"\n source_data: dict | DataFrame\n frame: DataFrame\n names: dict[str, str | None]\n ids: dict[str, str | int]\n\n plot_data = {}\n names = {}\n ids = {}\n\n given_data = data is not None\n if given_data:\n source_data = data\n else:\n # Data is optional; all variables can be defined as vectors\n # But simplify downstream code by always having a usable source data object\n source_data = {}\n\n # TODO Generally interested in accepting a generic DataFrame interface\n # Track https://data-apis.org/ for development\n\n # Variables can also be extracted from the index of a DataFrame\n if isinstance(source_data, pd.DataFrame):\n index = source_data.index.to_frame().to_dict(\"series\")\n else:\n index = {}\n\n for key, val in variables.items():\n\n # Simply ignore variables with no specification\n if val is None:\n continue\n\n # Try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow other hashables when\n # taking from the main data object. Allow only strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n\n # TODO this will be rendered unnecessary by the following pandas fix:\n # https://github.com/pandas-dev/pandas/pull/41283\n try:\n hash(val)\n val_is_hashable = True\n except TypeError:\n val_is_hashable = False\n\n val_as_data_key = (\n # See https://github.com/pandas-dev/pandas/pull/41283\n # (isinstance(val, abc.Hashable) and val in source_data)\n (val_is_hashable and val in source_data)\n or (isinstance(val, str) and val in index)\n )\n\n if val_as_data_key:\n\n if val in source_data:\n plot_data[key] = source_data[val]\n elif val in index:\n plot_data[key] = index[val]\n names[key] = ids[key] = str(val)\n\n elif isinstance(val, str):\n\n # This looks like a column name but, lookup failed.\n\n err = f\"Could not interpret value `{val}` for `{key}`. \"\n if not given_data:\n err += \"Value is a string, but `data` was not passed.\"\n else:\n err += \"An entry with this name does not appear in `data`.\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value somehow represents data\n\n # Ignore empty data structures\n if isinstance(val, abc.Sized) and len(val) == 0:\n continue\n\n # If vector has no index, it must match length of data table\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if isinstance(val, abc.Sized) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the original name using pandas-like metadata\n if hasattr(val, \"name\"):\n names[key] = ids[key] = str(val.name) # type: ignore # mypy/1424\n else:\n names[key] = None\n ids[key] = id(val)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n # TODO Note: this fails when variable specs *only* have scalars!\n frame = pd.DataFrame(plot_data)\n\n return frame, names, ids\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 178, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = source_data.index.to_frame().to_dict(\"series\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 13, "name": "GroupBy", "kind": "def", "category": "class", "info": "__init__\t_get_groups\t_reorder_columns\tagg\tapply"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 46, "name": "_get_groups", "kind": "def", "category": "function", "info": " def _get_groups(self, data: DataFrame) -> MultiIndex:\n \"\"\"Return index with Cartesian product of ordered grouping variable levels.\"\"\"\n levels = {}\n for var, order in self.order.items():\n if var in data:\n if order is None:\n order = categorical_order(data[var])\n levels[var] = order\n\n grouper: str | list[str]\n groups: Index | MultiIndex | None\n if not levels:\n grouper = []\n groups = None\n elif len(levels) > 1:\n grouper = list(levels)\n groups = pd.MultiIndex.from_product(levels.values(), names=grouper)\n else:\n grouper, = list(levels)\n groups = pd.Index(levels[grouper], name=grouper)\n return grouper, groups\n\n def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n group_ids = dict(zip(grouper, key))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 52, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(data[var])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 68, "name": "_reorder_columns", "kind": "def", "category": "function", "info": " def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n group_ids = dict(zip(grouper, key))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 83, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 105, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 108, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 108, "name": "func", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 112, "name": "func", "kind": "ref", "category": "function", "info": " parts[key] = func(part_df, *args, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 123, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(res, data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 12, "name": "Move", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 24, "name": "Jitter", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 46, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(self.seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 48, "name": "jitter", "kind": "def", "category": "function", "info": " def jitter(data, col, scale):\n noise = rng.uniform(-.5, +.5, len(data))\n offsets = noise * scale\n return data[col] + offsets\n\n if self.width:\n data[orient] = jitter(data, orient, self.width * data[\"width\"])\n if self.x:\n data[\"x\"] = jitter(data, \"x\", self.x)\n if self.y:\n data[\"y\"] = jitter(data, \"y\", self.y)\n\n return data\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 49, "name": "uniform", "kind": "ref", "category": "function", "info": " noise = rng.uniform(-.5, +.5, len(data))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 54, "name": "jitter", "kind": "ref", "category": "function", "info": " data[orient] = jitter(data, orient, self.width * data[\"width\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 56, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"x\"] = jitter(data, \"x\", self.x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 58, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"y\"] = jitter(data, \"y\", self.y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 64, "name": "Dodge", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 85, "name": "groupby_pos", "kind": "def", "category": "function", "info": " def groupby_pos(s):\n grouper = [groups[v] for v in [orient, \"col\", \"row\"] if v in data]\n return s.groupby(grouper, sort=False, observed=True)\n\n def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 89, "name": "scale_widths", "kind": "def", "category": "function", "info": " def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 100, "name": "widths_to_offsets", "kind": "def", "category": "function", "info": " def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 103, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 104, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 124, "name": "Stack", "kind": "def", "category": "class", "info": "_stack\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 130, "name": "_stack", "kind": "def", "category": "function", "info": " def _stack(self, df, orient):\n\n # TODO should stack do something with ymin/ymax style marks?\n # Should there be an upstream conversion to baseline/height parameterization?\n\n if df[\"baseline\"].nunique() > 1:\n err = \"Stack move cannot be used when baselines are already heterogeneous\"\n raise RuntimeError(err)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n stacked_lengths = (df[other] - df[\"baseline\"]).dropna().cumsum()\n offsets = stacked_lengths.shift(1).fillna(0)\n\n df[other] = stacked_lengths\n df[\"baseline\"] = df[\"baseline\"] + offsets\n\n return df\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n # TODO where to ensure that other semantic variables are sorted properly?\n # TODO why are we not using the passed in groupby here?\n groupers = [\"col\", \"row\", orient]\n return GroupBy(groupers).apply(data, self._stack, orient)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 155, "name": "GroupBy", "kind": "ref", "category": "function", "info": " return GroupBy(groupers).apply(data, self._stack, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 159, "name": "Shift", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 177, "name": "Norm", "kind": "def", "category": "class", "info": "_norm\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 189, "name": "_norm", "kind": "def", "category": "function", "info": " def _norm(self, df, var):\n\n if self.where is None:\n denom_data = df[var]\n else:\n denom_data = df.query(self.where)[var]\n df[var] = df[var] / denom_data.agg(self.func)\n\n if self.percent:\n df[var] = df[var] * 100\n\n return df\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return groupby.apply(data, self._norm, other)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 52, "name": "Layer", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 64, "name": "FacetSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 71, "name": "PairSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 81, "name": "Default", "kind": "def", "category": "class", "info": "__repr__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 86, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 90, "name": "theme_context", "kind": "def", "category": "function", "info": "def theme_context(params: dict[str, Any]) -> Generator:\n \"\"\"Temporarily modify specifc matplotlib rcParams.\"\"\"\n orig_params = {k: mpl.rcParams[k] for k in params}\n color_codes = \"bgrmyck\"\n nice_colors = [*color_palette(\"deep6\"), (.15, .15, .15)]\n orig_colors = [mpl.colors.colorConverter.colors[x] for x in color_codes]\n # TODO how to allow this to reflect the color cycle when relevant?\n try:\n mpl.rcParams.update(params)\n for (code, color) in zip(color_codes, nice_colors):\n mpl.colors.colorConverter.colors[code] = color\n mpl.colors.colorConverter.cache[code] = color\n yield\n finally:\n mpl.rcParams.update(orig_params)\n for (code, color) in zip(color_codes, orig_colors):\n mpl.colors.colorConverter.colors[code] = color\n mpl.colors.colorConverter.cache[code] = color\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 94, "name": "color_palette", "kind": "ref", "category": "function", "info": " nice_colors = [*color_palette(\"deep6\"), (.15, .15, .15)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 110, "name": "build_plot_signature", "kind": "def", "category": "function", "info": "def build_plot_signature(cls):\n \"\"\"\n Decorator function for giving Plot a useful signature.\n\n Currently this mostly saves us some duplicated typing, but we would\n like eventually to have a way of registering new semantic properties,\n at which point dynamic signature generation would become more important.\n\n \"\"\"\n sig = inspect.signature(cls)\n params = [\n inspect.Parameter(\"args\", inspect.Parameter.VAR_POSITIONAL),\n inspect.Parameter(\"data\", inspect.Parameter.KEYWORD_ONLY, default=None)\n ]\n params.extend([\n inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)\n for name in PROPERTIES\n ])\n new_sig = sig.replace(parameters=params)\n cls.__signature__ = new_sig\n\n known_properties = textwrap.fill(\n \", \".join([f\"|{p}|\" for p in PROPERTIES]),\n width=78, subsequent_indent=\" \" * 8,\n )\n\n if cls.__doc__ is not None: # support python -OO mode\n cls.__doc__ = cls.__doc__.format(known_properties=known_properties)\n\n return cls\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 146, "name": "Plot", "kind": "def", "category": "class", "info": "__init__\t_resolve_positionals\t__add__\t_repr_png_\t_clone\t_theme_with_defaults\t_variables\ton\tadd\tpair\tfacet\tscale\tshare\tlimit\tlabel\tlayout\ttheme\tsave\tshow\tplot\t_plot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 204, "name": "_resolve_positionals", "kind": "ref", "category": "function", "info": " data, variables = self._resolve_positionals(args, data, variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 211, "name": "PlotData", "kind": "ref", "category": "function", "info": " self._data = PlotData(data, variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 230, "name": "_resolve_positionals", "kind": "def", "category": "function", "info": " def _resolve_positionals(\n self,\n args: tuple[DataSource | VariableSpec, ...],\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataSource, dict[str, VariableSpec]]:\n \"\"\"Handle positional arguments, which may contain data / x / y.\"\"\"\n if len(args) > 3:\n err = \"Plot() accepts no more than 3 positional arguments (data, x, y).\"\n raise TypeError(err)\n\n # TODO need some clearer way to differentiate data / vector here\n # (There might be an abstract DataFrame class to use here?)\n if isinstance(args[0], (abc.Mapping, pd.DataFrame)):\n if data is not None:\n raise TypeError(\"`data` given by both name and position.\")\n data, args = args[0], args[1:]\n\n if len(args) == 2:\n x, y = args\n elif len(args) == 1:\n x, y = *args, None\n else:\n x = y = None\n\n for name, var in zip(\"yx\", (y, x)):\n if var is not None:\n if name in variables:\n raise TypeError(f\"`{name}` given by both name and position.\")\n # Keep coordinates at the front of the variables dict\n variables = {name: var, **variables}\n\n return data, variables\n\n def __add__(self, other):\n\n if isinstance(other, Mark) or isinstance(other, Stat):\n raise TypeError(\"Sorry, this isn't ggplot! Perhaps try Plot.add?\")\n\n other_type = other.__class__.__name__\n raise TypeError(f\"Unsupported operand type(s) for +: 'Plot' and '{other_type}\")\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 272, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 274, "name": "_repr_png_", "kind": "ref", "category": "function", "info": " return self.plot()._repr_png_()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 278, "name": "_clone", "kind": "def", "category": "function", "info": " def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 280, "name": "Plot", "kind": "ref", "category": "function", "info": " new = Plot()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 304, "name": "_theme_with_defaults", "kind": "def", "category": "function", "info": " def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 317, "name": "axes_style", "kind": "ref", "category": "function", "info": " **axes_style(\"darkgrid\"),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 318, "name": "plotting_context", "kind": "ref", "category": "function", "info": " **plotting_context(\"notebook\"),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 319, "name": "color_palette", "kind": "ref", "category": "function", "info": " \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 325, "name": "_variables", "kind": "def", "category": "function", "info": " def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 336, "name": "on", "kind": "def", "category": "function", "info": " def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 377, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 457, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 471, "name": "pair", "kind": "def", "category": "function", "info": " def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 530, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 534, "name": "facet", "kind": "def", "category": "function", "info": " def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 590, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 597, "name": "scale", "kind": "def", "category": "function", "info": " def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 620, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 624, "name": "share", "kind": "def", "category": "function", "info": " def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 639, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 643, "name": "limit", "kind": "def", "category": "function", "info": " def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 660, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 664, "name": "label", "kind": "def", "category": "function", "info": " def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 686, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 692, "name": "layout", "kind": "def", "category": "function", "info": " def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 726, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 737, "name": "theme", "kind": "def", "category": "function", "info": " def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 757, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 770, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 784, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 784, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 785, "name": "_plot", "kind": "ref", "category": "function", "info": " self._plot().save(loc, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 785, "name": "save", "kind": "ref", "category": "function", "info": " self._plot().save(loc, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 812, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 812, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 813, "name": "_plot", "kind": "ref", "category": "function", "info": " return self._plot(pyplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 815, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 820, "name": "Plotter", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 820, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 823, "name": "_extract_data", "kind": "ref", "category": "function", "info": " common, layers = plotter._extract_data(self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 824, "name": "_setup_figure", "kind": "ref", "category": "function", "info": " plotter._setup_figure(self, common, layers)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 827, "name": "match", "kind": "ref", "category": "function", "info": " coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 828, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, common, layers, coord_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 831, "name": "_compute_stats", "kind": "ref", "category": "function", "info": " plotter._compute_stats(self, layers)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 834, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, common, layers)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 843, "name": "_plot_layer", "kind": "ref", "category": "function", "info": " plotter._plot_layer(self, layer)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 846, "name": "_make_legend", "kind": "ref", "category": "function", "info": " plotter._make_legend(self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 847, "name": "_finalize_figure", "kind": "ref", "category": "function", "info": " plotter._finalize_figure(self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 855, "name": "Plotter", "kind": "def", "category": "class", "info": "__init__\tsave\tshow\t_repr_png_\t_extract_data\t_resolve_label\t_setup_figure\t_compute_stats\t_get_scale\t_get_subplot_data\t_setup_scales\t_plot_layer\t_scale_coords\t_unscale_coords\t_generate_pairings\t_get_subplot_index\t_filter_subplot_data\t_setup_split_generator\t_update_legend_contents\t_make_legend\t_finalize_figure"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 876, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plotter: # TODO type args\n kwargs.setdefault(\"dpi\", 96)\n try:\n loc = os.path.expanduser(loc)\n except TypeError:\n # loc may be a buffer in which case that would not work\n pass\n self._figure.savefig(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Display the plot by hooking into pyplot.\n\n This method calls :func:`matplotlib.pyplot.show` with any keyword parameters.\n\n \"\"\"\n # TODO if we did not create the Plotter with pyplot, is it possible to do this?\n # If not we should clearly raise.\n import matplotlib.pyplot as plt\n with theme_context(self._theme):\n plt.show(**kwargs)\n\n # TODO API for accessing the underlying matplotlib objects\n # TODO what else is useful in the public API for this class?\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n # TODO better to do this through a Jupyter hook? e.g.\n # ipy = IPython.core.formatters.get_ipython()\n # fmt = ipy.display_formatter.formatters[\"text/html\"]\n # fmt.for_type(Plot, ...)\n # Would like to have a svg option too, not sure how to make that flexible\n\n # TODO use matplotlib backend directly instead of going through savefig?\n\n # TODO perhaps have self.show() flip a switch to disable this, so that\n # user does not end up with two versions of the figure in the output\n\n # TODO use bbox_inches=\"tight\" like the inline backend?\n # pro: better results, con: (sometimes) confusing results\n # Better solution would be to default (with option to change)\n # to using constrained/tight layout.\n\n # TODO need to decide what the right default behavior here is:\n # - Use dpi=72 to match default InlineBackend figure size?\n # - Accept a generic \"scaling\" somewhere and scale DPI from that,\n # either with 1x -> 72 or 1x -> 96 and the default scaling be .75?\n # - Listen to rcParams? InlineBackend behavior makes that so complicated :(\n # - Do we ever want to *not* use retina mode at this point?\n\n from PIL import Image\n\n dpi = 96\n buffer = io.BytesIO()\n\n with theme_context(self._theme):\n self._figure.savefig(buffer, dpi=dpi * 2, format=\"png\", bbox_inches=\"tight\")\n data = buffer.getvalue()\n\n scaling = .85 / 2\n w, h = Image.open(buffer).size\n metadata = {\"width\": w * scaling, \"height\": h * scaling}\n return data, metadata\n\n def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 879, "name": "expanduser", "kind": "ref", "category": "function", "info": " loc = os.path.expanduser(loc)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 896, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 902, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(c for c in layer[\"vars\"] if c not in variables)\n return variables\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or use `None` to use an \"identity\" scale, which treats data\n values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 932, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 934, "name": "getvalue", "kind": "ref", "category": "function", "info": " data = buffer.getvalue()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 941, "name": "_extract_data", "kind": "def", "category": "function", "info": " def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 957, "name": "_resolve_label", "kind": "def", "category": "function", "info": " def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 963, "name": "manual_label", "kind": "ref", "category": "function", "info": " label = manual_label(auto_label)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 972, "name": "_setup_figure", "kind": "def", "category": "function", "info": " def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 986, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(common.frame[dim])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 989, "name": "Subplots", "kind": "ref", "category": "function", "info": " self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 992, "name": "init_figure", "kind": "ref", "category": "function", "info": " self._figure = subplots.init_figure(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1013, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " label = self._resolve_label(p, axis_key, auto_label)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1049, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1051, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " key = self._resolve_label(p, dim, common.names.get(dim))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1069, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " title = self._resolve_label(p, \"title\", None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1072, "name": "_compute_stats", "kind": "def", "category": "function", "info": " def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1108, "name": "match", "kind": "ref", "category": "function", "info": " drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", x)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1112, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1118, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(grouper)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1126, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1135, "name": "infer_scale", "kind": "ref", "category": "function", "info": " scale = prop.infer_scale(arg, values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1137, "name": "default_scale", "kind": "ref", "category": "function", "info": " scale = prop.default_scale(values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1141, "name": "_get_subplot_data", "kind": "def", "category": "function", "info": " def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1150, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(df, view)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1163, "name": "_setup_scales", "kind": "def", "category": "function", "info": " def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(v for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n from seaborn._core.scales import Nominal\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n self._scales[var] = scale._setup(var_df[var], prop)\n\n # Everything below here applies only to coordinate variables\n # We additionally skip it when we're working with a value\n # that is derived from a coordinate we've already processed.\n # e.g., the Stat consumed y and added ymin/ymax. In that case,\n # we've already setup the y scale and ymin/max are in scale space.\n if axis is None or (var != coord and coord in p._variables):\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n idx = self._get_subplot_index(layer_df, view)\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1185, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1211, "name": "_get_scale", "kind": "ref", "category": "function", "info": " scale = self._get_scale(p, scale_key, prop, var_df[var])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1233, "name": "Version", "kind": "ref", "category": "function", "info": " if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1233, "name": "Version", "kind": "ref", "category": "function", "info": " if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1244, "name": "_identity", "kind": "ref", "category": "function", "info": " self._scales[var] = Scale._identity()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1246, "name": "_setup", "kind": "ref", "category": "function", "info": " self._scales[var] = scale._setup(var_df[var], prop)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1267, "name": "_get_subplot_data", "kind": "ref", "category": "function", "info": " seed_values = self._get_subplot_data(var_df, var, view, share_state)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1268, "name": "_setup", "kind": "ref", "category": "function", "info": " view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1269, "name": "set_scale_obj", "kind": "ref", "category": "function", "info": " set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1274, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(layer_df, view)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1275, "name": "view_scale", "kind": "ref", "category": "function", "info": " new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1283, "name": "_plot_layer", "kind": "def", "category": "function", "info": " def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1294, "name": "_generate_pairings", "kind": "ref", "category": "function", "info": " for subplots, df, scales in self._generate_pairings(data, pair_variables):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1296, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1298, "name": "get_order", "kind": "def", "category": "function", "info": " def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if \"width\" in mark._mappable_props:\n width = mark._resolve(df, \"width\", None)\n else:\n width = df.get(\"width\", 0.8) # TODO what default\n if orient in df:\n df[\"width\"] = width * scales[orient]._spacing(df[orient])\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = df.get(\"baseline\", 0)\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1307, "name": "_resolve", "kind": "ref", "category": "function", "info": " width = mark._resolve(df, \"width\", None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1311, "name": "_spacing", "kind": "ref", "category": "function", "info": " df[\"width\"] = width * scales[orient]._spacing(df[orient])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1318, "name": "_resolve", "kind": "ref", "category": "function", "info": " baseline = mark._resolve(df, \"baseline\", None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1335, "name": "get_order", "kind": "ref", "category": "function", "info": " order = {var: get_order(var) for var in move_groupers}\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1336, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1337, "name": "move_step", "kind": "ref", "category": "function", "info": " df = move_step(df, groupby, orient, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1339, "name": "_unscale_coords", "kind": "ref", "category": "function", "info": " df = self._unscale_coords(subplots, df, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1342, "name": "_setup_split_generator", "kind": "ref", "category": "function", "info": " split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1344, "name": "_plot", "kind": "ref", "category": "function", "info": " mark._plot(split_generator, scales, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1351, "name": "_update_legend_contents", "kind": "ref", "category": "function", "info": " self._update_legend_contents(p, mark, data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1353, "name": "_scale_coords", "kind": "def", "category": "function", "info": " def _scale_coords(self, subplots: list[dict], df: DataFrame) -> DataFrame:\n # TODO stricter type on subplots\n\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n out_df = (\n df\n .copy(deep=False)\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n with pd.option_context(\"mode.use_inf_as_null\", True):\n axes_df = axes_df.dropna()\n for var, values in axes_df.items():\n scale = view[f\"{var[0]}scale\"]\n out_df.loc[values.index, var] = scale(values)\n\n return out_df\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1356, "name": "match", "kind": "ref", "category": "function", "info": " coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1365, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " view_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1371, "name": "scale", "kind": "ref", "category": "function", "info": " out_df.loc[values.index, var] = scale(values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1375, "name": "_unscale_coords", "kind": "def", "category": "function", "info": " def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{var[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, var] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1379, "name": "match", "kind": "ref", "category": "function", "info": " coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", c)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1389, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " view_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1395, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = axis.get_transform().inverted().transform\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1407, "name": "_generate_pairings", "kind": "def", "category": "function", "info": " def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1444, "name": "match", "kind": "ref", "category": "function", "info": " cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", col)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1449, "name": "_get_subplot_index", "kind": "def", "category": "function", "info": " def _get_subplot_index(self, df: DataFrame, subplot: dict) -> DataFrame:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1460, "name": "_filter_subplot_data", "kind": "def", "category": "function", "info": " def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1471, "name": "_setup_split_generator", "kind": "def", "category": "function", "info": " def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n allow_empty = False # TODO will need to recreate previous categorical plots\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1484, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(df[var])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1487, "name": "split_generator", "kind": "def", "category": "function", "info": " def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_null\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1491, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " axes_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1526, "name": "get_group", "kind": "ref", "category": "function", "info": " df_subset = grouped_df.get_group(pd_key)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1546, "name": "_update_legend_contents", "kind": "def", "category": "function", "info": " def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars = set()\n for frame in data.frames.values():\n legend_vars.update(frame.columns.intersection(scales))\n else:\n legend_vars = data.frame.columns.intersection(scales)\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artists.append(mark._legend_artist(variables, val, scales))\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1576, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " title = self._resolve_label(p, var, data.names[var])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1585, "name": "_legend_artist", "kind": "ref", "category": "function", "info": " artists.append(mark._legend_artist(variables, val, scales))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1590, "name": "_make_legend", "kind": "def", "category": "function", "info": " def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += artist[i],\n else:\n existing_artists[i] = artist, artists[i]\n else:\n merged_contents[key] = artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1619, "name": "Legend", "kind": "ref", "category": "function", "info": " legend = mpl.legend.Legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1639, "name": "_finalize_figure", "kind": "def", "category": "function", "info": " def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1650, "name": "convert_units", "kind": "ref", "category": "function", "info": " lo = a if a is None else convert_units(a)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1651, "name": "convert_units", "kind": "ref", "category": "function", "info": " hi = b if b is None else convert_units(b)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 46, "name": "Property", "kind": "def", "category": "class", "info": "__init__\tdefault_scale\tinfer_scale\tget_mapping\tstandardize\t_check_dict_entries\t_check_list_length"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 61, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Scale:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n # TODO allow variable_type to be \"boolean\" if that's a scale?\n # TODO how will this handle data with units that can be treated as numeric\n # if passed through a registered matplotlib converter?\n var_type = variable_type(data, boolean_type=\"numeric\")\n if var_type == \"numeric\":\n return Continuous()\n elif var_type == \"datetime\":\n return Temporal()\n # TODO others\n # time-based (TimeStamp, TimeDelta, Period)\n # boolean scale?\n else:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 66, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 68, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 70, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 75, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 77, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 88, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(trans=arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 97, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 105, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 109, "name": "_check_dict_entries", "kind": "def", "category": "function", "info": " def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 117, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 147, "name": "Coordinate", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 158, "name": "IntervalProperty", "kind": "def", "category": "class", "info": "default_range\t_forward\t_inverse\tinfer_scale\tget_mapping\t_get_categorical_mapping"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 166, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 170, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 174, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 178, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 184, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 185, "name": "variable_type", "kind": "ref", "category": "function", "info": " elif variable_type(data) == \"categorical\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 186, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 187, "name": "variable_type", "kind": "ref", "category": "function", "info": " elif variable_type(data) == \"datetime\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 188, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 191, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 193, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 198, "name": "_get_categorical_mapping", "kind": "ref", "category": "function", "info": " return self._get_categorical_mapping(scale, data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 201, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(self.default_range)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 203, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(scale.values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 216, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 217, "name": "_inverse", "kind": "ref", "category": "function", "info": " return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 221, "name": "_get_categorical_mapping", "kind": "def", "category": "function", "info": " def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 225, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 228, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 231, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 245, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward([vmin, vmax])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 246, "name": "_inverse", "kind": "ref", "category": "function", "info": " values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 248, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 258, "name": "PointSize", "kind": "def", "category": "class", "info": "_forward\t_inverse"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 262, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values):\n \"\"\"Square native values to implement linear scaling of point area.\"\"\"\n return np.square(values)\n\n def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 266, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 271, "name": "LineWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 274, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 280, "name": "EdgeWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 283, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 289, "name": "Stroke", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 294, "name": "Alpha", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 305, "name": "ObjectProperty", "kind": "def", "category": "class", "info": "_default_values\tdefault_scale\tinfer_scale\tget_mapping"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 314, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 317, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 318, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 320, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 321, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 323, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 328, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 332, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 335, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 337, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(n)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 345, "name": "standardize", "kind": "ref", "category": "function", "info": " values = [self.standardize(x) for x in values]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 347, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 357, "name": "Marker", "kind": "def", "category": "class", "info": "standardize\t_default_values"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 359, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " null_value = MarkerStyle(\"\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 366, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: MarkerPattern) -> MarkerStyle:\n return MarkerStyle(val)\n\n def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 367, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return MarkerStyle(val)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 369, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 396, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " markers = [MarkerStyle(m) for m in markers[:n]]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 401, "name": "LineStyle", "kind": "def", "category": "class", "info": "standardize\t_default_values\t_get_dash_pattern"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 405, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: str | DashPattern) -> DashPatternWithOffset:\n return self._get_dash_pattern(val)\n\n def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 406, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return self._get_dash_pattern(val)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 408, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 449, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return [self._get_dash_pattern(x) for x in dashes]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 452, "name": "_get_dash_pattern", "kind": "def", "category": "function", "info": " def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 503, "name": "Color", "kind": "def", "category": "class", "info": "standardize\t_standardize_color_sequence\tinfer_scale\t_get_categorical_mapping\tget_mapping"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 508, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: ColorSpec) -> RGBTuple | RGBATuple:\n # Return color with alpha channel only if the input spec has it\n # This is so that RGBA colors can override the Alpha property\n if to_rgba(val) != to_rgba(val, 1):\n return to_rgba(val)\n else:\n return to_rgb(val)\n\n def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 516, "name": "_standardize_color_sequence", "kind": "def", "category": "function", "info": " def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 518, "name": "has_alpha", "kind": "def", "category": "function", "info": " def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 524, "name": "has_alpha", "kind": "ref", "category": "function", "info": " needs_alpha = any(has_alpha(x) for x in colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 531, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 536, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"categorical\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 539, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 546, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 547, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 550, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 566, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 568, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 571, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 573, "name": "_get_categorical_mapping", "kind": "def", "category": "function", "info": " def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 575, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 580, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 584, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 586, "name": "blend_palette", "kind": "ref", "category": "function", "info": " colors = blend_palette(values, n)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 588, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(values, n)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 590, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n <= len(get_color_cycle()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 592, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 594, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 604, "name": "_standardize_color_sequence", "kind": "ref", "category": "function", "info": " colors = self._standardize_color_sequence(colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 606, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 615, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 622, "name": "_get_categorical_mapping", "kind": "ref", "category": "function", "info": " return self._get_categorical_mapping(scale, data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 626, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(\"ch:\", as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 630, "name": "blend_palette", "kind": "ref", "category": "function", "info": " mapping = blend_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 635, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 646, "name": "_mapping", "kind": "def", "category": "function", "info": " def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 650, "name": "mapping", "kind": "ref", "category": "function", "info": " out = mapping(x)[:, :3]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 662, "name": "Fill", "kind": "def", "category": "class", "info": "standardize\t_default_values\tdefault_scale\tinfer_scale\tget_mapping"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 671, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> bool:\n return bool(val)\n\n def _default_values(self, n: int) -> list:\n \"\"\"Return a list of n values, alternating True and False.\"\"\"\n if n > 2:\n msg = \" \".join([\n f\"The variable assigned to {self.variable} has more than two levels,\",\n f\"so {self.variable} values will cycle and may be uninterpretable\",\n ])\n # TODO fire in a \"nice\" way (see above)\n warnings.warn(msg, UserWarning)\n return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]\n\n def default_scale(self, data: Series) -> Nominal:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO infer Boolean where possible?\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps each data value to True or False.\"\"\"\n # TODO categorical_order is going to return [False, True] for booleans,\n # and [0, 1] for binary, but the default values order is [True, False].\n # We should special case this to handle it properly, or change\n # categorical_order to not \"sort\" booleans. Note that we need to sync with\n # what's going to happen upstream in the scale, so we can't just do it here.\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n\n if isinstance(scale.values, list):\n values = [bool(x) for x in scale.values]\n elif isinstance(scale.values, dict):\n values = [bool(scale.values[x]) for x in levels]\n elif scale.values is None:\n values = self._default_values(len(levels))\n else:\n msg = \" \".join([\n f\"Scale values for {self.variable} must be passed in\",\n f\"a list or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else False\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 674, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 685, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 687, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 689, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 692, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 694, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 704, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 711, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(len(levels))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 719, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 762, "name": "cls", "kind": "ref", "category": "function", "info": "PROPERTIES = {var: cls(var) for var, cls in PROPERTY_CLASSES.items()}\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 16, "name": "VarType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 37, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(\n vector: Series,\n boolean_type: Literal[\"numeric\", \"categorical\"] = \"numeric\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 65, "name": "is_categorical_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_categorical_dtype(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 66, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 70, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 86, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(boolean_type)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 89, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 90, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 92, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 93, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 99, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VarType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 105, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 106, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 110, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 116, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 117, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 121, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 124, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector: Series, order: list | None = None) -> list:\n \"\"\"\n Return a list of unique data values using seaborn's ordering rules.\n\n Parameters\n ----------\n vector : Series\n Vector of \"categorical\" values\n order : list\n Desired order of category levels to override the order determined\n from the `data` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is not None:\n return order\n\n if vector.dtype.name == \"category\":\n order = list(vector.cat.categories)\n else:\n order = list(filter(pd.notnull, vector.unique()))\n if variable_type(order) == \"numeric\":\n order.sort()\n\n return order\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 149, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(order) == \"numeric\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 50, "name": "Scale", "kind": "def", "category": "class", "info": "__post_init__\ttick\tlabel\t_get_locators\t_get_formatter\t_get_scale\t_spacing\t_setup\t__call__\t_identity"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 61, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n self._tick_params = None\n self._label_params = None\n self._legend = None\n\n def tick(self):\n raise NotImplementedError()\n\n def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 67, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self):\n raise NotImplementedError()\n\n def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 70, "name": "label", "kind": "def", "category": "function", "info": " def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 73, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 76, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 79, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 81, "name": "_get_locators", "kind": "ref", "category": "function", "info": " major_locator, minor_locator = self._get_locators(**self._tick_params)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 82, "name": "_get_formatter", "kind": "ref", "category": "function", "info": " major_formatter = self._get_formatter(major_locator, **self._label_params)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 84, "name": "InternalScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 91, "name": "InternalScale", "kind": "ref", "category": "function", "info": " return InternalScale(name, (forward, inverse))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 93, "name": "_spacing", "kind": "def", "category": "function", "info": " def _spacing(self, x: Series) -> float:\n return self._spacer(x)\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 94, "name": "_spacer", "kind": "ref", "category": "function", "info": " return self._spacer(x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 96, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 111, "name": "func", "kind": "ref", "category": "function", "info": " data = func(data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 119, "name": "_identity", "kind": "def", "category": "function", "info": " def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 121, "name": "Identity", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 127, "name": "Identity", "kind": "ref", "category": "function", "info": " return Identity()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 131, "name": "Nominal", "kind": "def", "category": "class", "info": "_setup\ttick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 142, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 148, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 150, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 155, "name": "categorical_order", "kind": "ref", "category": "function", "info": " units_seed = categorical_order(data, new.order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 169, "name": "CatScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 179, "name": "CatScale", "kind": "ref", "category": "function", "info": " mpl_scale = CatScale(data.name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 181, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 193, "name": "stringify", "kind": "ref", "category": "function", "info": " axis.update_units(stringify(np.array(units_seed)))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 204, "name": "stringify", "kind": "ref", "category": "function", "info": " out[keep] = axis.convert_units(stringify(x[keep]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 209, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(new, data),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 213, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n return 1\n\n new._spacer = spacer\n\n if prop.legend:\n new._legend = units_seed, list(stringify(units_seed))\n\n return new\n\n def tick(self, locator: Locator | None = None):\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n }\n return new\n\n def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 219, "name": "stringify", "kind": "ref", "category": "function", "info": " new._legend = units_seed, list(stringify(units_seed))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 223, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self, locator: Locator | None = None):\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n }\n return new\n\n def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 247, "name": "label", "kind": "def", "category": "function", "info": " def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 272, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 277, "name": "StrCategoryLocator", "kind": "ref", "category": "function", "info": " locator = mpl.category.StrCategoryLocator({})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 281, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 286, "name": "StrCategoryFormatter", "kind": "ref", "category": "function", "info": " formatter = mpl.category.StrCategoryFormatter({})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 292, "name": "Ordinal", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 298, "name": "Discrete", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 304, "name": "ContinuousBase", "kind": "def", "category": "class", "info": "_setup\t_get_transform"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 309, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n data = np.array([data])\n\n for func in self._pipeline:\n if func is not None:\n data = func(data)\n\n if scalar_data:\n data = data[0]\n\n return data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 315, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 317, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 319, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = new._get_transform()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 321, "name": "_get_scale", "kind": "ref", "category": "function", "info": " mpl_scale = new._get_scale(data.name, forward, inverse)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 324, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 337, "name": "forward", "kind": "ref", "category": "function", "info": " a = forward(vmin)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 338, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 338, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 340, "name": "normalize", "kind": "def", "category": "function", "info": " def normalize(x):\n return (x - a) / b\n\n else:\n normalize = vmin = vmax = None\n\n new._pipeline = [\n axis.convert_units,\n forward,\n normalize,\n prop.get_mapping(new, data)\n ]\n\n def spacer(x):\n x = x.dropna().unique()\n if len(x) < 2:\n return np.nan\n return np.min(np.diff(np.sort(x)))\n new._spacer = spacer\n\n # TODO How to allow disabling of legend for all uses of property?\n # Could add a Scale parameter, or perhaps Scale.suppress()?\n # Are there other useful parameters that would be in Scale.legend()\n # besides allowing Scale.legend(False)?\n if prop.legend:\n axis.set_view_interval(vmin, vmax)\n locs = axis.major.locator()\n locs = locs[(vmin <= locs) & (locs <= vmax)]\n labels = axis.major.formatter.format_ticks(locs)\n new._legend = list(locs), list(labels)\n\n return new\n\n def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 350, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(new, data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 353, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n return 1\n\n new._spacer = spacer\n\n if prop.legend:\n new._legend = units_seed, list(stringify(units_seed))\n\n return new\n\n def tick(self, locator: Locator | None = None):\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n }\n return new\n\n def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 366, "name": "locator", "kind": "ref", "category": "function", "info": " locs = axis.major.locator()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 373, "name": "_get_transform", "kind": "def", "category": "function", "info": " def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 377, "name": "get_param", "kind": "def", "category": "function", "info": " def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 383, "name": "_make_identity_transforms", "kind": "ref", "category": "function", "info": " return _make_identity_transforms()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 388, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 390, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"logit\", 10)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 391, "name": "_make_logit_transforms", "kind": "ref", "category": "function", "info": " return _make_logit_transforms(base)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 393, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"log\", 10)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 394, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms(base)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 396, "name": "get_param", "kind": "ref", "category": "function", "info": " c = get_param(\"symlog\", 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 397, "name": "_make_symlog_transforms", "kind": "ref", "category": "function", "info": " return _make_symlog_transforms(c)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 399, "name": "get_param", "kind": "ref", "category": "function", "info": " exp = get_param(\"pow\", 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 400, "name": "_make_power_transforms", "kind": "ref", "category": "function", "info": " return _make_power_transforms(exp)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 402, "name": "_make_sqrt_transforms", "kind": "ref", "category": "function", "info": " return _make_sqrt_transforms()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 408, "name": "Continuous", "kind": "def", "category": "class", "info": "tick\tlabel\t_parse_for_log_params\t_get_locators\t_get_formatter"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 420, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n # Input checks\n if locator is not None and not isinstance(locator, Locator):\n raise TypeError(\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError(\"`count` requires `between` with log transform.\")\n if every is not None:\n raise RuntimeError(\"`every` not supported with log transform.\")\n\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n \"at\": at,\n \"upto\": upto,\n \"count\": count,\n \"every\": every,\n \"between\": between,\n \"minor\": minor,\n }\n return new\n\n def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None = None,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | Transforms | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 462, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 481, "name": "label", "kind": "def", "category": "function", "info": " def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None = None,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | Transforms | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 531, "name": "_parse_for_log_params", "kind": "def", "category": "function", "info": " def _parse_for_log_params(\n self, trans: str | Transforms | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 537, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^log(\\d*)\", trans)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 540, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"symlog(\\d*)\", trans)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 545, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 547, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 564, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = self._get_transform()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 565, "name": "forward", "kind": "ref", "category": "function", "info": " lo, hi = forward(between)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 566, "name": "inverse", "kind": "ref", "category": "function", "info": " ticks = inverse(np.linspace(lo, hi, num=count))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 601, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 603, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 642, "name": "Temporal", "kind": "def", "category": "class", "info": "tick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 660, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n # Input checks\n if locator is not None and not isinstance(locator, Locator):\n raise TypeError(\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError(\"`count` requires `between` with log transform.\")\n if every is not None:\n raise RuntimeError(\"`every` not supported with log transform.\")\n\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n \"at\": at,\n \"upto\": upto,\n \"count\": count,\n \"every\": every,\n \"between\": between,\n \"minor\": minor,\n }\n return new\n\n def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None = None,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | Transforms | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 694, "name": "label", "kind": "def", "category": "function", "info": " def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None = None,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | Transforms | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is None:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 723, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator, upto):\n\n if locator is not None:\n major_locator = locator\n elif upto is not None:\n major_locator = AutoDateLocator(minticks=2, maxticks=upto)\n\n else:\n major_locator = AutoDateLocator(minticks=2, maxticks=6)\n minor_locator = None\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, concise):\n\n if formatter is not None:\n return formatter\n\n if concise:\n # TODO ideally we would have concise coordinate ticks,\n # but full semantic ticks. Is that possible?\n formatter = ConciseDateFormatter(locator)\n else:\n formatter = AutoDateFormatter(locator)\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 736, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter, concise):\n\n if formatter is not None:\n return formatter\n\n if concise:\n # TODO ideally we would have concise coordinate ticks,\n # but full semantic ticks. Is that possible?\n formatter = ConciseDateFormatter(locator)\n else:\n formatter = AutoDateFormatter(locator)\n\n return formatter\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 769, "name": "PseudoAxis", "kind": "def", "category": "class", "info": "__init__\tset_view_interval\tget_view_interval\tset_data_interval\tget_data_interval\tget_tick_space\tset_major_locator\tset_major_formatter\tset_minor_locator\tset_minor_formatter\tset_units\tupdate_units\tconvert_units\tget_scale\tget_majorticklocs"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 786, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.major = mpl.axis.Ticker()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 787, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.minor = mpl.axis.Ticker()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 839, "name": "get_converter", "kind": "ref", "category": "function", "info": " self.converter = mpl.units.registry.get_converter(x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 841, "name": "default_units", "kind": "ref", "category": "function", "info": " self.converter.default_units(x, self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 843, "name": "axisinfo", "kind": "ref", "category": "function", "info": " info = self.converter.axisinfo(self.units, self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 861, "name": "convert", "kind": "ref", "category": "function", "info": " return self.converter.convert(x, self.units, self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 872, "name": "locator", "kind": "ref", "category": "function", "info": " return self.major.locator()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 879, "name": "_make_identity_transforms", "kind": "def", "category": "function", "info": "def _make_identity_transforms() -> Transforms:\n\n def identity(x):\n return x\n\n return identity, identity\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 887, "name": "_make_logit_transforms", "kind": "def", "category": "function", "info": "def _make_logit_transforms(base: float = None) -> Transforms:\n\n log, exp = _make_log_transforms(base)\n\n def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 889, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 891, "name": "logit", "kind": "def", "category": "function", "info": " def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 895, "name": "expit", "kind": "def", "category": "function", "info": " def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 902, "name": "_make_log_transforms", "kind": "def", "category": "function", "info": "def _make_log_transforms(base: float | None = None) -> Transforms:\n\n if base is None:\n fs = np.log, np.exp\n elif base == 2:\n fs = np.log2, partial(np.power, 2)\n elif base == 10:\n fs = np.log10, partial(np.power, 10)\n else:\n def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 911, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 926, "name": "_make_symlog_transforms", "kind": "def", "category": "function", "info": "def _make_symlog_transforms(c: float = 1, base: float = 10) -> Transforms:\n\n # From https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001\n\n # Note: currently not using base because we only get\n # one parameter from the string, and are using c (this is consistent with d3)\n\n log, exp = _make_log_transforms(base)\n\n def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 933, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 935, "name": "symlog", "kind": "def", "category": "function", "info": " def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 939, "name": "symexp", "kind": "def", "category": "function", "info": " def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 946, "name": "_make_sqrt_transforms", "kind": "def", "category": "function", "info": "def _make_sqrt_transforms() -> Transforms:\n\n def sqrt(x):\n return np.sign(x) * np.sqrt(np.abs(x))\n\n def square(x):\n return np.sign(x) * np.square(x)\n\n return sqrt, square\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 957, "name": "_make_power_transforms", "kind": "def", "category": "function", "info": "def _make_power_transforms(exp: float) -> Transforms:\n\n def forward(x):\n return np.sign(x) * np.power(np.abs(x), exp)\n\n def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 959, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 962, "name": "inverse", "kind": "def", "category": "function", "info": " def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 15, "name": "Subplots", "kind": "def", "category": "class", "info": "__init__\t_check_dimension_uniqueness\t_determine_grid_dimensions\t_handle_wrapping\t_determine_axis_sharing\tinit_figure\t__iter__\t__len__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 40, "name": "_check_dimension_uniqueness", "kind": "ref", "category": "function", "info": " self._check_dimension_uniqueness(facet_spec, pair_spec)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 41, "name": "_determine_grid_dimensions", "kind": "ref", "category": "function", "info": " self._determine_grid_dimensions(facet_spec, pair_spec)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 42, "name": "_handle_wrapping", "kind": "ref", "category": "function", "info": " self._handle_wrapping(facet_spec, pair_spec)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 43, "name": "_determine_axis_sharing", "kind": "ref", "category": "function", "info": " self._determine_axis_sharing(pair_spec)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 45, "name": "_check_dimension_uniqueness", "kind": "def", "category": "function", "info": " def _check_dimension_uniqueness(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Reject specs that pair and facet on (or wrap to) same figure dimension.\"\"\"\n err = None\n\n facet_vars = facet_spec.get(\"variables\", {})\n\n if facet_spec.get(\"wrap\") and {\"col\", \"row\"} <= set(facet_vars):\n err = \"Cannot wrap facets when specifying both `col` and `row`.\"\n elif (\n pair_spec.get(\"wrap\")\n and pair_spec.get(\"cross\", True)\n and len(pair_spec.get(\"structure\", {}).get(\"x\", [])) > 1\n and len(pair_spec.get(\"structure\", {}).get(\"y\", [])) > 1\n ):\n err = \"Cannot wrap subplots when pairing on both `x` and `y`.\"\n\n collisions = {\"x\": [\"columns\", \"rows\"], \"y\": [\"rows\", \"columns\"]}\n for pair_axis, (multi_dim, wrap_dim) in collisions.items():\n if pair_axis not in pair_spec.get(\"structure\", {}):\n continue\n elif multi_dim[:3] in facet_vars:\n err = f\"Cannot facet the {multi_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and facet_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and pair_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {multi_dim} while faceting the {wrap_dim}.\"\n\n if err is not None:\n raise RuntimeError(err) # TODO what err class? Define PlotSpecError?\n\n def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 77, "name": "_determine_grid_dimensions", "kind": "def", "category": "function", "info": " def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 101, "name": "_handle_wrapping", "kind": "def", "category": "function", "info": " def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 120, "name": "_determine_axis_sharing", "kind": "def", "category": "function", "info": " def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 141, "name": "init_figure", "kind": "def", "category": "function", "info": " def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_decorators.py", "rel_fname": "seaborn/_decorators.py", "line": 3, "name": "share_init_params_with_map", "kind": "def", "category": "function", "info": "def share_init_params_with_map(cls):\n \"\"\"Make cls.map a classmethod with same signature as cls.__init__.\"\"\"\n map_sig = signature(cls.map)\n init_sig = signature(cls.__init__)\n\n new = [v for k, v in init_sig.parameters.items() if k != \"self\"]\n new.insert(0, map_sig.parameters[\"cls\"])\n cls.map.__signature__ = map_sig.replace(parameters=new)\n cls.map.__doc__ = cls.__init__.__doc__\n\n cls.map = classmethod(cls.map)\n\n return cls\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 5, "name": "DocstringComponents", "kind": "def", "category": "class", "info": "__init__\t__getattr__\tfrom_nested_components\tfrom_function_params"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 18, "name": "group", "kind": "ref", "category": "function", "info": " entries[key] = m.group(1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 24, "name": "__getattr__", "kind": "def", "category": "function", "info": " def __getattr__(self, attr):\n \"\"\"Provide dot access to entries for clean raw docstrings.\"\"\"\n if attr in self.entries:\n return self.entries[attr]\n else:\n try:\n return self.__getattribute__(attr)\n except AttributeError as err:\n # If Python is run with -OO, it will strip docstrings and our lookup\n # from self.entries will fail. We check for __debug__, which is actually\n # set to False by -O (it is True for normal execution).\n # But we only want to see an error when building the docs;\n # not something users should see, so this slight inconsistency is fine.\n if __debug__:\n raise err\n else:\n pass\n\n @classmethod\n def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 43, "name": "from_nested_components", "kind": "def", "category": "function", "info": " def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 45, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(kwargs, strip_whitespace=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 48, "name": "from_function_params", "kind": "def", "category": "function", "info": " def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 50, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 58, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(comp_dict)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 194, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " params=DocstringComponents(_core_params),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 195, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " returns=DocstringComponents(_core_returns),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 196, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " seealso=DocstringComponents(_seealso_blurbs),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 20, "name": "AreaBase", "kind": "def", "category": "class", "info": "_plot\t_standardize_coordinate_parameters\t_postprocess_artist\t_get_verts\t_legend_artist"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 22, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n patches = defaultdict(list)\n\n for keys, data, ax in split_gen():\n\n kws = {}\n data = self._standardize_coordinate_parameters(data, orient)\n resolved = resolve_properties(self, keys, scales)\n verts = self._get_verts(data, orient)\n ax.update_datalim(verts)\n\n # TODO should really move this logic into resolve_color\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n kws[\"facecolor\"] = fc\n kws[\"edgecolor\"] = resolve_color(self, keys, \"edge\", scales)\n kws[\"linewidth\"] = resolved[\"edgewidth\"]\n kws[\"linestyle\"] = resolved[\"edgestyle\"]\n\n patches[ax].append(mpl.patches.Polygon(verts, **kws))\n\n for ax, ax_patches in patches.items():\n\n for patch in ax_patches:\n self._postprocess_artist(patch, ax, orient)\n ax.add_patch(patch)\n\n def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 26, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 29, "name": "_standardize_coordinate_parameters", "kind": "ref", "category": "function", "info": " data = self._standardize_coordinate_parameters(data, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 30, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 31, "name": "_get_verts", "kind": "ref", "category": "function", "info": " verts = self._get_verts(data, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 32, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(verts)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 35, "name": "resolve_color", "kind": "ref", "category": "function", "info": " fc = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 37, "name": "to_rgba", "kind": "ref", "category": "function", "info": " fc = mpl.colors.to_rgba(fc, 0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 40, "name": "resolve_color", "kind": "ref", "category": "function", "info": " kws[\"edgecolor\"] = resolve_color(self, keys, \"edge\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 44, "name": "Polygon", "kind": "ref", "category": "function", "info": " patches[ax].append(mpl.patches.Polygon(verts, **kws))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 49, "name": "_postprocess_artist", "kind": "ref", "category": "function", "info": " self._postprocess_artist(patch, ax, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 50, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(patch)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 52, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 55, "name": "_postprocess_artist", "kind": "def", "category": "function", "info": " def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 58, "name": "_get_verts", "kind": "def", "category": "function", "info": " def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 61, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 63, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}min\"]].to_numpy(),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 64, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 70, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 73, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 75, "name": "resolve_color", "kind": "ref", "category": "function", "info": " fc = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 77, "name": "to_rgba", "kind": "ref", "category": "function", "info": " fc = mpl.colors.to_rgba(fc, 0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 79, "name": "Patch", "kind": "ref", "category": "function", "info": " return mpl.patches.Patch(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 81, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edgecolor=resolve_color(self, keys, \"edge\", scales),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 90, "name": "Area", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters\t_postprocess_artist"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 103, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 104, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 105, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 106, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 107, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 108, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 109, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 112, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 114, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 116, "name": "rename", "kind": "ref", "category": "function", "info": " return data.rename(columns={\"baseline\": f\"{dv}min\", dv: f\"{dv}max\"})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 118, "name": "_postprocess_artist", "kind": "def", "category": "function", "info": " def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 123, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " artist.set_linewidth(artist.get_linewidth() * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 123, "name": "get_linewidth", "kind": "ref", "category": "function", "info": " artist.set_linewidth(artist.get_linewidth() * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 125, "name": "get_linestyle", "kind": "ref", "category": "function", "info": " linestyle = artist.get_linestyle()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 128, "name": "set_linestyle", "kind": "ref", "category": "function", "info": " artist.set_linestyle(linestyle)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "get_path", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "get_transform", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 132, "name": "set_clip_box", "kind": "ref", "category": "function", "info": " artist.set_clip_box(ax.bbox)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 140, "name": "Band", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 153, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 154, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 155, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 156, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 157, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 158, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(0, )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 159, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableFloat = Mappable(\"-\", )\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 161, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient)\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 27, "name": "BarBase", "kind": "def", "category": "class", "info": "_make_patches\t_resolve_properties\t_legend_artist"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 29, "name": "_make_patches", "kind": "def", "category": "function", "info": " def _make_patches(self, data, scales, orient):\n\n kws = self._resolve_properties(data, scales)\n if orient == \"x\":\n kws[\"x\"] = (data[\"x\"] - data[\"width\"] / 2).to_numpy()\n kws[\"y\"] = data[\"baseline\"].to_numpy()\n kws[\"w\"] = data[\"width\"].to_numpy()\n kws[\"h\"] = (data[\"y\"] - data[\"baseline\"]).to_numpy()\n else:\n kws[\"x\"] = data[\"baseline\"].to_numpy()\n kws[\"y\"] = (data[\"y\"] - data[\"width\"] / 2).to_numpy()\n kws[\"w\"] = (data[\"x\"] - data[\"baseline\"]).to_numpy()\n kws[\"h\"] = data[\"width\"].to_numpy()\n\n kws.pop(\"width\", None)\n kws.pop(\"baseline\", None)\n\n val_dim = {\"x\": \"h\", \"y\": \"w\"}[orient]\n bars, vals = [], []\n\n for i in range(len(data)):\n\n row = {k: v[i] for k, v in kws.items()}\n\n # Skip bars with no value. It's possible we'll want to make this\n # an option (i.e so you have an artist for animating or annotating),\n # but let's keep things simple for now.\n if not np.nan_to_num(row[val_dim]):\n continue\n\n bar = mpl.patches.Rectangle(\n xy=(row[\"x\"], row[\"y\"]),\n width=row[\"w\"],\n height=row[\"h\"],\n facecolor=row[\"facecolor\"],\n edgecolor=row[\"edgecolor\"],\n linestyle=row[\"edgestyle\"],\n linewidth=row[\"edgewidth\"],\n **self.artist_kws,\n )\n bars.append(bar)\n vals.append(row[val_dim])\n\n return bars, vals\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 31, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " kws = self._resolve_properties(data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 33, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"x\"] = (data[\"x\"] - data[\"width\"] / 2).to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 34, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"y\"] = data[\"baseline\"].to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 35, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"w\"] = data[\"width\"].to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 36, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"h\"] = (data[\"y\"] - data[\"baseline\"]).to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 38, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"x\"] = data[\"baseline\"].to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 39, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"y\"] = (data[\"y\"] - data[\"width\"] / 2).to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 40, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"w\"] = (data[\"x\"] - data[\"baseline\"]).to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 41, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"h\"] = data[\"width\"].to_numpy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 59, "name": "Rectangle", "kind": "ref", "category": "function", "info": " bar = mpl.patches.Rectangle(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 74, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 76, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 78, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 79, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 90, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 95, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " key = self._resolve_properties(key, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 96, "name": "Patch", "kind": "ref", "category": "function", "info": " artist = mpl.patches.Patch(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 107, "name": "Bar", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 120, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 121, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 122, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 123, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 124, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 125, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 126, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 129, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(.8, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 130, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 132, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n val_idx = [\"y\", \"x\"].index(orient)\n\n for _, data, ax in split_gen():\n\n bars, vals = self._make_patches(data, scales, orient)\n\n for bar in bars:\n\n # Because we are clipping the artist (see below), the edges end up\n # looking half as wide as they actually are. I don't love this clumsy\n # workaround, which is going to cause surprises if you work with the\n # artists directly. We may need to revisit after feedback.\n bar.set_linewidth(bar.get_linewidth() * 2)\n linestyle = bar.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))\n bar.set_linestyle(linestyle)\n\n # This is a bit of a hack to handle the fact that the edge lines are\n # centered on the actual extents of the bar, and overlap when bars are\n # stacked or dodged. We may discover that this causes problems and needs\n # to be revisited at some point. Also it should be faster to clip with\n # a bbox than a path, but I cant't work out how to get the intersection\n # with the axes bbox.\n bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n if self.artist_kws.get(\"clip_on\", True):\n # It seems the above hack undoes the default axes clipping\n bar.set_clip_box(ax.bbox)\n bar.sticky_edges[val_idx][:] = (0, np.inf)\n ax.add_patch(bar)\n\n # Add a container which is useful for, e.g. Axes.bar_label\n if Version(mpl.__version__) >= Version(\"3.4.0\"):\n orientation = {\"x\": \"vertical\", \"y\": \"horizontal\"}[orient]\n container_kws = dict(datavalues=vals, orientation=orientation)\n else:\n container_kws = {}\n container = mpl.container.BarContainer(bars, **container_kws)\n ax.add_container(container)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 136, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 138, "name": "_make_patches", "kind": "ref", "category": "function", "info": " bars, vals = self._make_patches(data, scales, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 146, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " bar.set_linewidth(bar.get_linewidth() * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 146, "name": "get_linewidth", "kind": "ref", "category": "function", "info": " bar.set_linewidth(bar.get_linewidth() * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 147, "name": "get_linestyle", "kind": "ref", "category": "function", "info": " linestyle = bar.get_linestyle()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 150, "name": "set_linestyle", "kind": "ref", "category": "function", "info": " bar.set_linestyle(linestyle)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 158, "name": "get_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 163, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(bar)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 166, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.4.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 166, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.4.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 171, "name": "BarContainer", "kind": "ref", "category": "function", "info": " container = mpl.container.BarContainer(bars, **container_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 172, "name": "add_container", "kind": "ref", "category": "function", "info": " ax.add_container(container)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 177, "name": "Bars", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 190, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 191, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 192, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 193, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(rc=\"patch.edgecolor\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 194, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 195, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(auto=True, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 196, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 199, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 200, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 202, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n val_idx = [\"y\", \"x\"].index(orient)\n\n for _, data, ax in split_gen():\n\n bars, vals = self._make_patches(data, scales, orient)\n\n for bar in bars:\n\n # Because we are clipping the artist (see below), the edges end up\n # looking half as wide as they actually are. I don't love this clumsy\n # workaround, which is going to cause surprises if you work with the\n # artists directly. We may need to revisit after feedback.\n bar.set_linewidth(bar.get_linewidth() * 2)\n linestyle = bar.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))\n bar.set_linestyle(linestyle)\n\n # This is a bit of a hack to handle the fact that the edge lines are\n # centered on the actual extents of the bar, and overlap when bars are\n # stacked or dodged. We may discover that this causes problems and needs\n # to be revisited at some point. Also it should be faster to clip with\n # a bbox than a path, but I cant't work out how to get the intersection\n # with the axes bbox.\n bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n if self.artist_kws.get(\"clip_on\", True):\n # It seems the above hack undoes the default axes clipping\n bar.set_clip_box(ax.bbox)\n bar.sticky_edges[val_idx][:] = (0, np.inf)\n ax.add_patch(bar)\n\n # Add a container which is useful for, e.g. Axes.bar_label\n if Version(mpl.__version__) >= Version(\"3.4.0\"):\n orientation = {\"x\": \"vertical\", \"y\": \"horizontal\"}[orient]\n container_kws = dict(datavalues=vals, orientation=orientation)\n else:\n container_kws = {}\n container = mpl.container.BarContainer(bars, **container_kws)\n ax.add_container(container)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 208, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 209, "name": "_make_patches", "kind": "ref", "category": "function", "info": " bars, _ = self._make_patches(data, scales, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 215, "name": "PatchCollection", "kind": "ref", "category": "function", "info": " col = mpl.collections.PatchCollection(ax_patches, match_original=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 217, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(col, autolim=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 223, "name": "get_paths", "kind": "ref", "category": "function", "info": " xys = np.vstack([path.vertices for path in col.get_paths()])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 224, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 229, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 231, "name": "get_dimensions", "kind": "def", "category": "function", "info": " def get_dimensions(collection):\n edges, widths = [], []\n for verts in (path.vertices for path in collection.get_paths()):\n edges.append(min(verts[:, ori_idx]))\n widths.append(np.ptp(verts[:, ori_idx]))\n return np.array(edges), np.array(widths)\n\n min_width = np.inf\n for ax, col in collections.items():\n edges, widths = get_dimensions(col)\n points = 72 / ax.figure.dpi * abs(\n ax.transData.transform([edges + widths] * 2)\n - ax.transData.transform([edges] * 2)\n )\n min_width = min(min_width, min(points[:, ori_idx]))\n\n linewidth = min(.1 * min_width, mpl.rcParams[\"patch.linewidth\"])\n for _, col in collections.items():\n col.set_linewidth(linewidth)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 233, "name": "get_paths", "kind": "ref", "category": "function", "info": " for verts in (path.vertices for path in collection.get_paths()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 240, "name": "get_dimensions", "kind": "ref", "category": "function", "info": " edges, widths = get_dimensions(col)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 242, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([edges + widths] * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 243, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([edges] * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 249, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " col.set_linewidth(linewidth)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 24, "name": "Mappable", "kind": "def", "category": "class", "info": "__init__\t__repr__\tdepend\tgrouping\tdefault"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 76, "name": "depend", "kind": "def", "category": "function", "info": " def depend(self) -> Any:\n \"\"\"Return the name of the feature to source a default value from.\"\"\"\n return self._depend\n\n @property\n def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 81, "name": "grouping", "kind": "def", "category": "function", "info": " def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 85, "name": "default", "kind": "def", "category": "function", "info": " def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 102, "name": "Mark", "kind": "def", "category": "class", "info": "_mappable_props\t_grouping_props\t_resolve\t_infer_orient\t_plot\t_legend_artist"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 108, "name": "_mappable_props", "kind": "def", "category": "function", "info": " def _mappable_props(self):\n return {\n f.name: getattr(self, f.name) for f in fields(self)\n if isinstance(f.default, Mappable)\n }\n\n @property\n def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n feature = scales[name](data[name])\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 115, "name": "_grouping_props", "kind": "def", "category": "function", "info": " def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n feature = scales[name](data[name])\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 124, "name": "_resolve", "kind": "def", "category": "function", "info": " def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n feature = scales[name](data[name])\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 150, "name": "Property", "kind": "ref", "category": "function", "info": " prop = PROPERTIES.get(name, Property(name))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 162, "name": "standardize", "kind": "ref", "category": "function", "info": " feature = prop.standardize(feature)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 182, "name": "_resolve", "kind": "ref", "category": "function", "info": " return self._resolve(data, feature.depend, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 184, "name": "standardize", "kind": "ref", "category": "function", "info": " default = prop.standardize(feature.default)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 191, "name": "_infer_orient", "kind": "def", "category": "function", "info": " def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 207, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 216, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 223, "name": "resolve_properties", "kind": "def", "category": "function", "info": "def resolve_properties(\n mark: Mark, data: DataFrame, scales: dict[str, Scale]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 228, "name": "_resolve", "kind": "ref", "category": "function", "info": " name: mark._resolve(data, name, scales) for name in mark._mappable_props\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 233, "name": "resolve_color", "kind": "def", "category": "function", "info": "def resolve_color(\n mark: Mark,\n data: DataFrame | dict,\n prefix: str = \"\",\n scales: dict[str, Scale] | None = None,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 259, "name": "_resolve", "kind": "ref", "category": "function", "info": " color = mark._resolve(data, f\"{prefix}color\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 262, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, f\"{prefix}alpha\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 264, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, \"alpha\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 266, "name": "visible", "kind": "def", "category": "function", "info": " def visible(x, axis=None):\n \"\"\"Detect \"invisible\" colors to set alpha appropriately.\"\"\"\n # TODO First clause only needed to handle non-rgba arrays,\n # which we are trying to handle upstream\n return np.array(x).dtype.kind != \"f\" or np.isfinite(x).all(axis)\n\n # Second check here catches vectors of strings with identity scale\n # It could probably be handled better upstream. This is a tricky problem\n if np.ndim(color) < 2 and all(isinstance(x, float) for x in color):\n if len(color) == 4:\n return mpl.colors.to_rgba(color)\n alpha = alpha if visible(color) else np.nan\n return mpl.colors.to_rgba(color, alpha)\n else:\n if np.ndim(color) == 2 and color.shape[1] == 4:\n return mpl.colors.to_rgba_array(color)\n alpha = np.where(visible(color, axis=1), alpha, np.nan)\n return mpl.colors.to_rgba_array(color, alpha)\n\n # TODO should we be implementing fill here too?\n # (i.e. set fillalpha to 0 when fill=False)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 276, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 277, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = alpha if visible(color) else np.nan\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 278, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color, alpha)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 281, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 282, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = np.where(visible(color, axis=1), alpha, np.nan)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 283, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color, alpha)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 289, "name": "document_properties", "kind": "def", "category": "function", "info": "def document_properties(mark):\n\n properties = [f.name for f in fields(mark) if isinstance(f.default, Mappable)]\n text = [\n \"\",\n \" This mark defines the following properties:\",\n textwrap.fill(\n \", \".join([f\"|{p}|\" for p in properties]),\n width=78, initial_indent=\" \" * 8, subsequent_indent=\" \" * 8,\n ),\n ]\n\n docstring_lines = mark.__doc__.split(\"\\n\")\n new_docstring = \"\\n\".join([\n *docstring_lines[:2],\n *text,\n *docstring_lines[2:],\n ])\n mark.__doc__ = new_docstring\n return mark\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 26, "name": "DotBase", "kind": "def", "category": "class", "info": "_resolve_paths\t_resolve_properties\t_plot\t_legend_artist"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 28, "name": "_resolve_paths", "kind": "def", "category": "function", "info": " def _resolve_paths(self, data):\n\n paths = []\n path_cache = {}\n marker = data[\"marker\"]\n\n def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 34, "name": "get_transformed_path", "kind": "def", "category": "function", "info": " def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 35, "name": "get_path", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 35, "name": "transformed", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 38, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " return get_transformed_path(marker)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 42, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " path_cache[m] = get_transformed_path(m)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 46, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 48, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 49, "name": "_resolve_paths", "kind": "ref", "category": "function", "info": " resolved[\"path\"] = self._resolve_paths(resolved)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 53, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = resolved[\"marker\"].is_filled()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 55, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 61, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 67, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 70, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " data = self._resolve_properties(data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 72, "name": "PathCollection", "kind": "ref", "category": "function", "info": " points = mpl.collections.PathCollection(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 81, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 84, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(points)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 86, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 91, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " res = self._resolve_properties(key, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 93, "name": "PathCollection", "kind": "ref", "category": "function", "info": " return mpl.collections.PathCollection(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 100, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 107, "name": "Dot", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 120, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(\"o\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 121, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(6, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 122, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 123, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 124, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 125, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 126, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 127, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(depend=\"alpha\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 128, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(.5, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 129, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 131, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 133, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 140, "name": "resolve_color", "kind": "ref", "category": "function", "info": " main_color = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 141, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edge_color = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 161, "name": "Dots", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 175, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"scatter.marker\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 176, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(4, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 177, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 178, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 179, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False) # TODO auto alpha?\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 180, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 181, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 182, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillalpha: MappableFloat = Mappable(.2, grouping=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 184, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 186, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 188, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 189, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 36, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 38, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 39, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 40, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"lines.marker\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 41, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(rc=\"lines.markersize\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 42, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 43, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 44, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"lines.markeredgewidth\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 48, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient)\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 50, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 52, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 53, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 54, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 55, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 58, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 58, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 62, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 65, "name": "_handle_capstyle", "kind": "ref", "category": "function", "info": " self._handle_capstyle(artist_kws, vals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 67, "name": "Line2D", "kind": "ref", "category": "function", "info": " line = mpl.lines.Line2D(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 68, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"x\"].to_numpy(),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 69, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"y\"].to_numpy(),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 80, "name": "add_line", "kind": "ref", "category": "function", "info": " ax.add_line(line)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 82, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 85, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 86, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 87, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 88, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 91, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 91, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 95, "name": "_handle_capstyle", "kind": "ref", "category": "function", "info": " self._handle_capstyle(artist_kws, vals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 97, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 110, "name": "_handle_capstyle", "kind": "def", "category": "function", "info": " def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 121, "name": "Line", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 140, "name": "Paths", "kind": "def", "category": "class", "info": "__post_init__\t_setup_lines\t_plot\t_legend_artist"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 153, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 154, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 155, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 156, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 160, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n # LineCollection artists have a capstyle property but don't source its value\n # from the rc, so we do that manually here. Unfortunately, because we add\n # only one LineCollection, we have the use the same capstyle for all lines\n # even when they are dashed. It's a slight inconsistency, but looks fine IMO.\n self.artist_kws.setdefault(\"capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n\n def _setup_lines(self, split_gen, scales, orient):\n\n line_data = {}\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n if ax not in line_data:\n line_data[ax] = {\n \"segments\": [],\n \"colors\": [],\n \"linewidths\": [],\n \"linestyles\": [],\n }\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n\n if self._sort:\n data = data.sort_values(orient)\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n line_data[ax][\"segments\"].append(xy)\n line_data[ax][\"colors\"].append(vals[\"color\"])\n line_data[ax][\"linewidths\"].append(vals[\"linewidth\"])\n line_data[ax][\"linestyles\"].append(vals[\"linestyle\"])\n\n return line_data\n\n def _plot(self, split_gen, scales, orient):\n\n line_data = self._setup_lines(split_gen, scales, orient)\n\n for ax, ax_data in line_data.items():\n lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n # Handle datalim update manually\n # https://github.com/matplotlib/matplotlib/issues/23129\n ax.add_collection(lines, autolim=False)\n xy = np.concatenate(ax_data[\"segments\"])\n ax.update_datalim(xy)\n\n def _legend_artist(self, variables, value, scales):\n\n key = resolve_properties(self, {v: value for v in variables}, scales)\n\n artist_kws = self.artist_kws.copy()\n capstyle = artist_kws.pop(\"capstyle\")\n artist_kws[\"solid_capstyle\"] = capstyle\n artist_kws[\"dash_capstyle\"] = capstyle\n\n return mpl.lines.Line2D(\n [], [],\n color=key[\"color\"],\n linewidth=key[\"linewidth\"],\n linestyle=key[\"linestyle\"],\n **artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 168, "name": "_setup_lines", "kind": "def", "category": "function", "info": " def _setup_lines(self, split_gen, scales, orient):\n\n line_data = {}\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n if ax not in line_data:\n line_data[ax] = {\n \"segments\": [],\n \"colors\": [],\n \"linewidths\": [],\n \"linestyles\": [],\n }\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n\n if self._sort:\n data = data.sort_values(orient)\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n line_data[ax][\"segments\"].append(xy)\n line_data[ax][\"colors\"].append(vals[\"color\"])\n line_data[ax][\"linewidths\"].append(vals[\"linewidth\"])\n line_data[ax][\"linestyles\"].append(vals[\"linestyle\"])\n\n return line_data\n\n def _plot(self, split_gen, scales, orient):\n\n line_data = self._setup_lines(split_gen, scales, orient)\n\n for ax, ax_data in line_data.items():\n lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n # Handle datalim update manually\n # https://github.com/matplotlib/matplotlib/issues/23129\n ax.add_collection(lines, autolim=False)\n xy = np.concatenate(ax_data[\"segments\"])\n ax.update_datalim(xy)\n\n def _legend_artist(self, variables, value, scales):\n\n key = resolve_properties(self, {v: value for v in variables}, scales)\n\n artist_kws = self.artist_kws.copy()\n capstyle = artist_kws.pop(\"capstyle\")\n artist_kws[\"solid_capstyle\"] = capstyle\n artist_kws[\"dash_capstyle\"] = capstyle\n\n return mpl.lines.Line2D(\n [], [],\n color=key[\"color\"],\n linewidth=key[\"linewidth\"],\n linestyle=key[\"linestyle\"],\n **artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 172, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 182, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 183, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 186, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 197, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient)\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 199, "name": "_setup_lines", "kind": "ref", "category": "function", "info": " line_data = self._setup_lines(split_gen, scales, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 202, "name": "LineCollection", "kind": "ref", "category": "function", "info": " lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 205, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines, autolim=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 207, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xy)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 209, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 211, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " key = resolve_properties(self, {v: value for v in variables}, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 218, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 229, "name": "Lines", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 247, "name": "Range", "kind": "def", "category": "class", "info": "_setup_lines"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 256, "name": "_setup_lines", "kind": "def", "category": "function", "info": " def _setup_lines(self, split_gen, scales, orient):\n\n line_data = {}\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n if ax not in line_data:\n line_data[ax] = {\n \"segments\": [],\n \"colors\": [],\n \"linewidths\": [],\n \"linestyles\": [],\n }\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n\n if self._sort:\n data = data.sort_values(orient)\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n line_data[ax][\"segments\"].append(xy)\n line_data[ax][\"colors\"].append(vals[\"color\"])\n line_data[ax][\"linewidths\"].append(vals[\"linewidth\"])\n line_data[ax][\"linestyles\"].append(vals[\"linestyle\"])\n\n return line_data\n\n def _plot(self, split_gen, scales, orient):\n\n line_data = self._setup_lines(split_gen, scales, orient)\n\n for ax, ax_data in line_data.items():\n lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n # Handle datalim update manually\n # https://github.com/matplotlib/matplotlib/issues/23129\n ax.add_collection(lines, autolim=False)\n xy = np.concatenate(ax_data[\"segments\"])\n ax.update_datalim(xy)\n\n def _legend_artist(self, variables, value, scales):\n\n key = resolve_properties(self, {v: value for v in variables}, scales)\n\n artist_kws = self.artist_kws.copy()\n capstyle = artist_kws.pop(\"capstyle\")\n artist_kws[\"solid_capstyle\"] = capstyle\n artist_kws[\"dash_capstyle\"] = capstyle\n\n return mpl.lines.Line2D(\n [], [],\n color=key[\"color\"],\n linewidth=key[\"linewidth\"],\n linestyle=key[\"linestyle\"],\n **artist_kws,\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 262, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 272, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 273, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 276, "name": "melt", "kind": "ref", "category": "function", "info": " data = data[cols].melt(orient, value_name=other)[[\"x\", \"y\"]]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 277, "name": "to_numpy", "kind": "ref", "category": "function", "info": " segments = [d.to_numpy() for _, d in data.groupby(orient)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 277, "name": "groupby", "kind": "ref", "category": "function", "info": " segments = [d.to_numpy() for _, d in data.groupby(orient)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 28, "name": "SemanticMapping", "kind": "def", "category": "class", "info": "__init__\tmap\t_check_list_length\t_lookup_single\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 54, "name": "cls", "kind": "ref", "category": "function", "info": " setattr(plotter, method_name, cls(plotter, *args, **kwargs))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 57, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels, values, variable):\n \"\"\"Input check when values are provided as a list.\"\"\"\n # Copied from _core/properties; eventually will be replaced for that.\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n if message:\n warnings.warn(message, UserWarning, stacklevel=6)\n\n return values\n\n def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 81, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 88, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return [self._lookup_single(k, *args, **kwargs) for k in key]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 90, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return self._lookup_single(key, *args, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 94, "name": "HueMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\tinfer_map_type\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 125, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 138, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, cmap = self.numeric_mapping(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 147, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 156, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 169, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 185, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 192, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 194, "name": "cmap", "kind": "ref", "category": "function", "info": " value = self.cmap(normed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 197, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, palette, norm, input_format, var_type):\n \"\"\"Determine how to implement the mapping.\"\"\"\n if palette in QUAL_PALETTES:\n map_type = \"categorical\"\n elif norm is not None:\n map_type = \"numeric\"\n elif isinstance(palette, (dict, list)):\n map_type = \"categorical\"\n elif input_format == \"wide\":\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n colors = self._check_list_length(levels, palette, \"palette\")\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 212, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n colors = self._check_list_length(levels, palette, \"palette\")\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 216, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 233, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n_colors <= len(get_color_cycle()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 234, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(None, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 236, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 238, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, palette, \"palette\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 240, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 246, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 254, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 260, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 271, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(palette, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 275, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 277, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 282, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 283, "name": "norm", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 283, "name": "dropna", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 285, "name": "cmap", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 285, "name": "norm", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 291, "name": "SizeMapping", "kind": "def", "category": "class", "info": "__init__\tinfer_map_type\t_lookup_single\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 312, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 320, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, size_range = self.numeric_mapping(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 328, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 338, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 352, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, norm, sizes, var_type):\n\n if norm is not None:\n map_type = \"numeric\"\n elif isinstance(sizes, (dict, list)):\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def _lookup_single(self, key):\n\n try:\n value = self.lookup_table[key]\n except KeyError:\n normed = self.norm(key)\n if np.ma.is_masked(normed):\n normed = np.nan\n value = self.size_range[0] + normed * np.ptp(self.size_range)\n return value\n\n def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n sizes = self._check_list_length(levels, sizes, \"sizes\")\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 363, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 368, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 369, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 374, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n sizes = self._check_list_length(levels, sizes, \"sizes\")\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 376, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 390, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " sizes = self._check_list_length(levels, sizes, \"sizes\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 432, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 445, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 480, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 483, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 495, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 496, "name": "norm", "kind": "ref", "category": "function", "info": " norm(levels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 499, "name": "norm", "kind": "ref", "category": "function", "info": " sizes_scaled = norm(levels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 513, "name": "StyleMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\t_map_attributes"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 536, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data) == \"datetime\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 540, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 542, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " markers = self._map_attributes(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 543, "name": "unique_markers", "kind": "ref", "category": "function", "info": " markers, levels, unique_markers(len(levels)), \"markers\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 545, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " dashes = self._map_attributes(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 546, "name": "unique_dashes", "kind": "ref", "category": "function", "info": " dashes, levels, unique_dashes(len(levels)), \"dashes\",\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 554, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 555, "name": "get_path", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 555, "name": "transformed", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 555, "name": "get_transform", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 556, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_markers.append(m.is_filled())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 579, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key, attr=None):\n \"\"\"Get attribute(s) for a given data point.\"\"\"\n if attr is None:\n value = self.lookup_table[key]\n else:\n value = self.lookup_table[key][attr]\n return value\n\n def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n arg = self._check_list_length(levels, arg, attr)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 587, "name": "_map_attributes", "kind": "def", "category": "function", "info": " def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n arg = self._check_list_length(levels, arg, attr)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 598, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " arg = self._check_list_length(levels, arg, attr)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 612, "name": "VectorPlotter", "kind": "def", "category": "class", "info": "__init__\tget_semantics\thas_xy_data\tvar_levels\tassign_variables\t_assign_variables_wideform\t_assign_variables_longform\titer_data\tcomp_data\t_get_axes\t_attach\t_log_scaled\t_add_axis_labels\tscale_native\tscale_numeric\tscale_datetime\tscale_categorical"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 639, "name": "assign_variables", "kind": "ref", "category": "function", "info": " self.assign_variables(data, variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 651, "name": "get_semantics", "kind": "def", "category": "function", "info": " def get_semantics(cls, kwargs, semantics=None):\n \"\"\"Subset a dictionary arguments with known semantic variables.\"\"\"\n # TODO this should be get_variables since we have included x and y\n if semantics is None:\n semantics = cls.semantics\n variables = {}\n for key, val in kwargs.items():\n if key in semantics and val is not None:\n variables[key] = val\n return variables\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 663, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 668, "name": "var_levels", "kind": "def", "category": "function", "info": " def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 688, "name": "assign_variables", "kind": "def", "category": "function", "info": " def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 695, "name": "_assign_variables_wideform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_wideform(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 700, "name": "_assign_variables_longform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_longform(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 707, "name": "variable_type", "kind": "ref", "category": "function", "info": " v: variable_type(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 716, "name": "_assign_variables_wideform", "kind": "def", "category": "function", "info": " def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 817, "name": "variable_type", "kind": "ref", "category": "function", "info": " k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 829, "name": "add_categories", "kind": "ref", "category": "function", "info": " wide_data.columns = wide_data.columns.add_categories(\"@index\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 834, "name": "to_series", "kind": "ref", "category": "function", "info": " wide_data[\"@index\"] = wide_data.index.to_series()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 858, "name": "_assign_variables_longform", "kind": "def", "category": "function", "info": " def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 902, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = data.index.to_frame()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 972, "name": "iter_data", "kind": "def", "category": "function", "info": " def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1032, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1044, "name": "convert_units", "kind": "ref", "category": "function", "info": " levels[axis] = converter.convert_units(levels[axis])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1050, "name": "date2num", "kind": "ref", "category": "function", "info": " levels[axis] = mpl.dates.date2num(levels[axis])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1051, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1074, "name": "get_group", "kind": "ref", "category": "function", "info": " data_subset = grouped_data.get_group(pd_key)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1094, "name": "comp_data", "kind": "def", "category": "function", "info": " def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_null', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1108, "name": "drop", "kind": "ref", "category": "function", "info": " .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1119, "name": "dropna", "kind": "ref", "category": "function", "info": " orig = orig.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1125, "name": "convert_units", "kind": "ref", "category": "function", "info": " comp = pd.to_numeric(converter.convert_units(orig))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1126, "name": "get_scale", "kind": "ref", "category": "function", "info": " if converter.get_scale() == \"log\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1139, "name": "_get_axes", "kind": "def", "category": "function", "info": " def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1154, "name": "_attach", "kind": "def", "category": "function", "info": " def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1179, "name": "flatten", "kind": "ref", "category": "function", "info": " ax_list = obj.axes.flatten()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1234, "name": "iter_data", "kind": "ref", "category": "function", "info": " for axes_vars, axes_data in self.iter_data():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1235, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(axes_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1260, "name": "categorical_order", "kind": "ref", "category": "function", "info": " seed_data = categorical_order(seed_data, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1261, "name": "update_units", "kind": "ref", "category": "function", "info": " converter.update_units(seed_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1282, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1284, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.3\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1284, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.3\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1285, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", base=scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1287, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", **{f\"base{axis}\": scale})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1293, "name": "set_inverted", "kind": "ref", "category": "function", "info": " ax.yaxis.set_inverted(True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1295, "name": "yaxis_inverted", "kind": "ref", "category": "function", "info": " if not ax.yaxis_inverted():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1296, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1300, "name": "_log_scaled", "kind": "def", "category": "function", "info": " def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1306, "name": "flatten", "kind": "ref", "category": "function", "info": " axes_list = self.facets.axes.flatten()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1313, "name": "get_scale", "kind": "ref", "category": "function", "info": " log_scaled.append(data_axis.get_scale() == \"log\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1320, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1326, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " if not ax.get_xlabel():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1327, "name": "get_visible", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1327, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1328, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1329, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " if not ax.get_ylabel():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1330, "name": "get_visible", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1330, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1331, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1338, "name": "scale_native", "kind": "def", "category": "function", "info": " def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1344, "name": "scale_numeric", "kind": "def", "category": "function", "info": " def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1351, "name": "scale_datetime", "kind": "def", "category": "function", "info": " def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1358, "name": "scale_categorical", "kind": "def", "category": "function", "info": " def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1400, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"axis\", [\"x\", \"y\"], axis)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1417, "name": "sort_values", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1427, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = pd.Index(categorical_order(cat_data, order))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1437, "name": "astype", "kind": "ref", "category": "function", "info": " cat_data = cat_data.astype(str)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1438, "name": "astype", "kind": "ref", "category": "function", "info": " order = order.astype(str)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1452, "name": "VariableType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1472, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(vector, boolean_type=\"numeric\"):\n \"\"\"\n Determine whether a vector contains numeric, categorical, or datetime data.\n\n This function differs from the pandas typing API in two ways:\n\n - Python sequences or object-typed PyData objects are considered numeric if\n all of their entries are numeric.\n - String or mixed-type data are considered categorical even if not\n explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.\n\n Parameters\n ----------\n vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence\n Input data to test.\n boolean_type : 'numeric' or 'categorical'\n Type to use for vectors containing only 0s and 1s (and NAs).\n\n Returns\n -------\n var_type : 'numeric', 'categorical', or 'datetime'\n Name identifying the type of data in the vector.\n \"\"\"\n\n # If a categorical dtype is set, infer categorical\n if pd.api.types.is_categorical_dtype(vector):\n return VariableType(\"categorical\")\n\n # Special-case all-na data, which is always \"numeric\"\n if pd.isna(vector).all():\n return VariableType(\"numeric\")\n\n # Special-case binary/boolean data, allow caller to determine\n # This triggers a numpy warning when vector has strings/objects\n # https://github.com/numpy/numpy/issues/6784\n # Because we reduce with .all(), we are agnostic about whether the\n # comparison returns a scalar or vector, so we will ignore the warning.\n # It triggers a separate DeprecationWarning when the vector has datetimes:\n # https://github.com/numpy/numpy/issues/13548\n # This is considered a bug by numpy and will likely go away.\n with warnings.catch_warnings():\n warnings.simplefilter(\n action='ignore', category=(FutureWarning, DeprecationWarning)\n )\n if np.isin(vector, [0, 1, np.nan]).all():\n return VariableType(boolean_type)\n\n # Defer to positive pandas tests\n if pd.api.types.is_numeric_dtype(vector):\n return VariableType(\"numeric\")\n\n if pd.api.types.is_datetime64_dtype(vector):\n return VariableType(\"datetime\")\n\n # --- If we get to here, we need to check the entries\n\n # Check for a collection where everything is a number\n\n def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1497, "name": "is_categorical_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_categorical_dtype(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1498, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1502, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1517, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(boolean_type)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1520, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1521, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1523, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1524, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1530, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1536, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1537, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1541, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1547, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1548, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1552, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1555, "name": "infer_orient", "kind": "def", "category": "function", "info": "def infer_orient(x=None, y=None, orient=None, require_numeric=True):\n \"\"\"Determine how the plot should be oriented based on the data.\n\n For historical reasons, the convention is to call a plot \"horizontally\"\n or \"vertically\" oriented based on the axis representing its dependent\n variable. Practically, this is used when determining the axis for\n numerical aggregation.\n\n Parameters\n ----------\n x, y : Vector data or None\n Positional data vectors for the plot.\n orient : string or None\n Specified orientation, which must start with \"v\" or \"h\" if not None.\n require_numeric : bool\n If set, raise when the implied dependent variable is not numeric.\n\n Returns\n -------\n orient : \"v\" or \"h\"\n\n Raises\n ------\n ValueError: When `orient` is not None and does not start with \"h\" or \"v\"\n TypeError: When dependent variable is not numeric, with `require_numeric`\n\n \"\"\"\n\n x_type = None if x is None else variable_type(x)\n y_type = None if y is None else variable_type(y)\n\n nonnumeric_dv_error = \"{} orientation requires numeric `{}` variable.\"\n single_var_warning = \"{} orientation ignored with only `{}` specified.\"\n\n if x is None:\n if str(orient).startswith(\"h\"):\n warnings.warn(single_var_warning.format(\"Horizontal\", \"y\"))\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"v\"\n\n elif y is None:\n if str(orient).startswith(\"v\"):\n warnings.warn(single_var_warning.format(\"Vertical\", \"x\"))\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"h\"\n\n elif str(orient).startswith(\"v\"):\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"v\"\n\n elif str(orient).startswith(\"h\"):\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"h\"\n\n elif orient is not None:\n err = (\n \"`orient` must start with 'v' or 'h' or be None, \"\n f\"but `{repr(orient)}` was passed.\"\n )\n raise ValueError(err)\n\n elif x_type != \"categorical\" and y_type == \"categorical\":\n return \"h\"\n\n elif x_type != \"numeric\" and y_type == \"numeric\":\n return \"v\"\n\n elif x_type == \"numeric\" and y_type != \"numeric\":\n return \"h\"\n\n elif require_numeric and \"numeric\" not in (x_type, y_type):\n err = \"Neither the `x` nor `y` variable appears to be numeric.\"\n raise TypeError(err)\n\n else:\n return \"v\"\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1583, "name": "variable_type", "kind": "ref", "category": "function", "info": " x_type = None if x is None else variable_type(x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1584, "name": "variable_type", "kind": "ref", "category": "function", "info": " y_type = None if y is None else variable_type(y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1637, "name": "unique_dashes", "kind": "def", "category": "function", "info": "def unique_dashes(n):\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes = [\n \"\",\n (4, 1.5),\n (1, 1),\n (3, 1.25, 1.5, 1.25),\n (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(\n list(a)[1:-1][::-1],\n list(b)[1:-1]\n ))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return dashes[:n]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1688, "name": "unique_markers", "kind": "def", "category": "function", "info": "def unique_markers(n):\n \"\"\"Build an arbitrarily long list of unique marker styles for points.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\",\n \"X\",\n (4, 0, 45),\n \"P\",\n (4, 0, 0),\n (4, 1, 0),\n \"^\",\n (4, 1, 45),\n \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([\n (s + 1, 1, a),\n (s + 1, 0, a),\n (s, 1, 0),\n (s, 0, 0),\n ])\n s += 1\n\n # Convert to MarkerStyle object, using only exactly what we need\n # markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]\n\n return markers[:n]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1734, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector, order=None):\n \"\"\"Return a list of unique data values.\n\n Determine an ordered list of levels in ``values``.\n\n Parameters\n ----------\n vector : list, array, Categorical, or Series\n Vector of \"categorical\" values\n order : list-like, optional\n Desired order of category levels to override the order determined\n from the ``values`` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is None:\n if hasattr(vector, \"categories\"):\n order = vector.categories\n else:\n try:\n order = vector.cat.categories\n except (TypeError, AttributeError):\n\n try:\n order = vector.unique()\n except AttributeError:\n order = pd.unique(vector)\n\n if variable_type(vector) == \"numeric\":\n order = np.sort(order)\n\n order = filter(pd.notnull, order)\n return list(order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1766, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(vector) == \"numeric\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 40, "name": "KDE", "kind": "def", "category": "class", "info": "__init__\t_define_support_grid\t_define_support_univariate\t_define_support_bivariate\tdefine_support\t_fit\t_eval_univariate\t_eval_bivariate\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 88, "name": "_define_support_grid", "kind": "def", "category": "function", "info": " def _define_support_grid(self, x, bw, cut, clip, gridsize):\n \"\"\"Create the grid of evaluation points depending for vector x.\"\"\"\n clip_lo = -np.inf if clip[0] is None else clip[0]\n clip_hi = +np.inf if clip[1] is None else clip[1]\n gridmin = max(x.min() - bw * cut, clip_lo)\n gridmax = min(x.max() + bw * cut, clip_hi)\n return np.linspace(gridmin, gridmax, gridsize)\n\n def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 96, "name": "_define_support_univariate", "kind": "def", "category": "function", "info": " def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 98, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 100, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid = self._define_support_grid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 105, "name": "_define_support_bivariate", "kind": "def", "category": "function", "info": " def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 111, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 114, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid1 = self._define_support_grid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 117, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid2 = self._define_support_grid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 123, "name": "define_support", "kind": "def", "category": "function", "info": " def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 126, "name": "_define_support_univariate", "kind": "ref", "category": "function", "info": " support = self._define_support_univariate(x1, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 128, "name": "_define_support_bivariate", "kind": "ref", "category": "function", "info": " support = self._define_support_bivariate(x1, x2, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 135, "name": "_fit", "kind": "def", "category": "function", "info": " def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 146, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 150, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x, cache=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 152, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 160, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde(support)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 164, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 168, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x1, x2, cache=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 170, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 184, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 191, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 193, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 196, "name": "Histogram", "kind": "def", "category": "class", "info": "__init__\t_define_bin_edges\tdefine_bin_params\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 240, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", stat_choices, stat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 251, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n if binrange is None:\n start, stop = x.min(), x.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n # Handle roundoff error (maybe there is a less clumsy way?)\n if bin_edges.max() < stop or len(bin_edges) < 2:\n bin_edges = np.append(bin_edges, bin_edges.max() + step)\n else:\n bin_edges = np.histogram_bin_edges(\n x, bins, binrange, weights,\n )\n return bin_edges\n\n def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 272, "name": "define_bin_params", "kind": "def", "category": "function", "info": " def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 276, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 321, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges.append(self._define_bin_edges(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 332, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 336, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x1, x2, cache=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 350, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 352, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 354, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / area\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 364, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 368, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 376, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 378, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 380, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / np.diff(bin_edges)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 393, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 395, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 398, "name": "ECDF", "kind": "def", "category": "class", "info": "__init__\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 411, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", [\"count\", \"proportion\"], stat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 415, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 419, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 446, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 448, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 451, "name": "EstimateAggregator", "kind": "def", "category": "class", "info": "__init__\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 471, "name": "_validate_errorbar_arg", "kind": "ref", "category": "function", "info": " method, level = _validate_errorbar_arg(errorbar)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 483, "name": "estimator", "kind": "ref", "category": "function", "info": " estimate = self.estimator(vals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 485, "name": "agg", "kind": "ref", "category": "function", "info": " estimate = vals.agg(self.estimator)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 495, "name": "error_method", "kind": "ref", "category": "function", "info": " err_min, err_max = self.error_method(vals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 502, "name": "sem", "kind": "ref", "category": "function", "info": " half_interval = vals.sem() * self.error_level\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 507, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(vals, self.error_level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 510, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 511, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(boots, self.error_level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 516, "name": "_percentile_interval", "kind": "def", "category": "function", "info": "def _percentile_interval(data, width):\n \"\"\"Return a percentile interval from data of a given width.\"\"\"\n edge = (100 - width) / 2\n percentiles = edge, 100 - edge\n return np.nanpercentile(data, percentiles)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 523, "name": "_validate_errorbar_arg", "kind": "def", "category": "function", "info": "def _validate_errorbar_arg(arg):\n \"\"\"Check type and value of errorbar argument and assign default level.\"\"\"\n DEFAULT_LEVELS = {\n \"ci\": 95,\n \"pi\": 95,\n \"se\": 1,\n \"sd\": 1,\n }\n\n usage = \"`errorbar` must be a callable, string, or (string, number) tuple\"\n\n if arg is None:\n return None, None\n elif callable(arg):\n return arg, None\n elif isinstance(arg, str):\n method = arg\n level = DEFAULT_LEVELS.get(method, None)\n else:\n try:\n method, level = arg\n except (ValueError, TypeError) as err:\n raise err.__class__(usage) from err\n\n _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n if level is not None and not isinstance(level, Number):\n raise TypeError(usage)\n\n return method, level\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 547, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 16, "name": "Agg", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 45, "name": "Est", "kind": "def", "category": "class", "info": "_process\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 70, "name": "_process", "kind": "def", "category": "function", "info": " def _process(\n self, data: DataFrame, var: str, estimator: EstimateAggregator\n ) -> DataFrame:\n # Needed because GroupBy.apply assumes func is DataFrame -> DataFrame\n # which we could probably make more general to allow Series return\n res = estimator(data, var)\n return pd.DataFrame([res])\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n boot_kws = {\"n_boot\": self.n_boot, \"seed\": self.seed}\n engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n\n var = {\"x\": \"y\", \"y\": \"x\"}.get(orient)\n res = (\n groupby\n .apply(data, self._process, var, engine)\n .dropna(subset=[\"x\", \"y\"])\n .reset_index(drop=True)\n )\n\n res = res.fillna({f\"{var}min\": res[var], f\"{var}max\": res[var]})\n\n return res\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 75, "name": "estimator", "kind": "ref", "category": "function", "info": " res = estimator(data, var)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 83, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 99, "name": "Rolling", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 13, "name": "Stat", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "__future__", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "annotations", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "dataclasses", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "dataclass", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "typing", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "ClassVar", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "typing", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "TYPE_CHECKING", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "TYPE_CHECKING", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "pandas", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "DataFrame", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "seaborn", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "_core", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "groupby", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "GroupBy", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "seaborn", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "_core", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": ".", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "scales", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "Scale", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "@dataclass", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "Stat", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "group_by_orient", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "ClassVar", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "bool", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "__call__", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "self", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "data", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "DataFrame", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "groupby", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "GroupBy", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "orient", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "str", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "scales", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "dict", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "str", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "Scale", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "DataFrame", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": -1, "name": "data", "kind": "ref", "category": "function", "info": "none"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 16, "name": "Hist", "kind": "def", "category": "class", "info": "_define_bin_edges\t_define_bin_params\t_get_bins_and_eval\t_eval\t_normalize\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 36, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n vals = vals.dropna()\n\n if binrange is None:\n start, stop = vals.min(), vals.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n else:\n bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n\n # TODO warning or cap on too many bins?\n\n return bin_edges\n\n def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weight, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n # TODO better to do this as an isinstance check?\n # We are only asking about Nominal scales now,\n # but presumably would apply to Ordinal too?\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 38, "name": "dropna", "kind": "ref", "category": "function", "info": " vals = vals.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 57, "name": "_define_bin_params", "kind": "def", "category": "function", "info": " def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weight, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n # TODO better to do this as an isinstance check?\n # We are only asking about Nominal scales now,\n # but presumably would apply to Ordinal too?\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 66, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 79, "name": "_get_bins_and_eval", "kind": "def", "category": "function", "info": " def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n # TODO better to do this as an isinstance check?\n # We are only asking about Nominal scales now,\n # but presumably would apply to Ordinal too?\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 81, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 82, "name": "apply", "kind": "ref", "category": "function", "info": " return groupby.apply(data, self._eval, orient, bin_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 84, "name": "_eval", "kind": "def", "category": "function", "info": " def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weight = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n vals, **bin_kws, weights=weight, density=density,\n )\n\n width = np.diff(bin_edges)\n pos = bin_edges[:-1] + width / 2\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n return pd.DataFrame({orient: pos, other: hist, \"space\": width})\n\n def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n # TODO better to do this as an isinstance check?\n # We are only asking about Nominal scales now,\n # but presumably would apply to Ordinal too?\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 100, "name": "_normalize", "kind": "def", "category": "function", "info": " def _normalize(self, data, orient):\n\n other = \"y\" if orient == \"x\" else \"x\"\n hist = data[other]\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{other: hist})\n\n def __call__(self, data, groupby, orient, scales):\n\n # TODO better to do this as an isinstance check?\n # We are only asking about Nominal scales now,\n # but presumably would apply to Ordinal too?\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [v for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n # TODO Make this an option?\n # (This needs to be tested if enabled, and maybe should be in _eval)\n # other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n # data = data[data[other] > 0]\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data, orient)\n else:\n if self.common_norm is False:\n norm_grouper = grouping_vars\n else:\n norm_grouper = self.common_norm\n normalize = partial(self._normalize, orient=orient)\n data = GroupBy(norm_grouper).apply(data, normalize)\n\n return data\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 106, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 108, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 110, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / data[\"space\"]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 118, "name": "assign", "kind": "ref", "category": "function", "info": " return data.assign(**{other: hist})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 128, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 129, "name": "apply", "kind": "ref", "category": "function", "info": " data = groupby.apply(data, self._eval, orient, bin_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 132, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 134, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(self.common_bins)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 135, "name": "apply", "kind": "ref", "category": "function", "info": " data = bin_groupby.apply(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 145, "name": "_normalize", "kind": "ref", "category": "function", "info": " data = self._normalize(data, orient)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 152, "name": "GroupBy", "kind": "ref", "category": "function", "info": " data = GroupBy(norm_grouper).apply(data, normalize)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/histogram.py", "rel_fname": "seaborn/_stats/histogram.py", "line": 152, "name": "apply", "kind": "ref", "category": "function", "info": " data = GroupBy(norm_grouper).apply(data, normalize)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 10, "name": "PolyFit", "kind": "def", "category": "class", "info": "_fit_predict\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 21, "name": "_fit_predict", "kind": "def", "category": "function", "info": " def _fit_predict(self, data):\n\n x = data[\"x\"]\n y = data[\"y\"]\n if x.nunique() <= self.order:\n # TODO warn?\n xx = yy = []\n else:\n p = np.polyfit(x, y, self.order)\n xx = np.linspace(x.min(), x.max(), self.gridsize)\n yy = np.polyval(p, xx)\n\n return pd.DataFrame(dict(x=xx, y=yy))\n\n # TODO we should have a way of identifying the method that will be applied\n # and then only define __call__ on a base-class of stats with this pattern\n\n def __call__(self, data, groupby, orient, scales):\n\n return groupby.apply(data, self._fit_predict)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 25, "name": "nunique", "kind": "ref", "category": "function", "info": " if x.nunique() <= self.order:\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 40, "name": "apply", "kind": "ref", "category": "function", "info": " return groupby.apply(data, self._fit_predict)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 44, "name": "OLSFit", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 6, "name": "bootstrap", "kind": "def", "category": "function", "info": "def bootstrap(*args, **kwargs):\n \"\"\"Resample one or more arrays with replacement and store aggregate values.\n\n Positional arguments are a sequence of arrays to bootstrap along the first\n axis and pass to a summary function.\n\n Keyword arguments:\n n_boot : int, default=10000\n Number of iterations\n axis : int, default=None\n Will pass axis to ``func`` as a keyword argument.\n units : array, default=None\n Array of sampling unit IDs. When used the bootstrap resamples units\n and then observations within units instead of individual\n datapoints.\n func : string or callable, default=\"mean\"\n Function to call on the args that are passed in. If string, uses as\n name of function in the numpy namespace. If nans are present in the\n data, will try to use nan-aware version of named function.\n seed : Generator | SeedSequence | RandomState | int | None\n Seed for the random number generator; useful if you want\n reproducible resamples.\n\n Returns\n -------\n boot_dist: array\n array of bootstrapped statistic values\n\n \"\"\"\n # Ensure list of arrays are same length\n if len(np.unique(list(map(len, args)))) > 1:\n raise ValueError(\"All input arrays must have the same length\")\n n = len(args[0])\n\n # Default keyword arguments\n n_boot = kwargs.get(\"n_boot\", 10000)\n func = kwargs.get(\"func\", \"mean\")\n axis = kwargs.get(\"axis\", None)\n units = kwargs.get(\"units\", None)\n random_seed = kwargs.get(\"random_seed\", None)\n if random_seed is not None:\n msg = \"`random_seed` has been renamed to `seed` and will be removed\"\n warnings.warn(msg)\n seed = kwargs.get(\"seed\", random_seed)\n if axis is None:\n func_kwargs = dict()\n else:\n func_kwargs = dict(axis=axis)\n\n # Initialize the resampler\n rng = _handle_random_seed(seed)\n\n # Coerce to arrays\n args = list(map(np.asarray, args))\n if units is not None:\n units = np.asarray(units)\n\n if isinstance(func, str):\n\n # Allow named numpy functions\n f = getattr(np, func)\n\n # Try to use nan-aware version of function if necessary\n missing_data = np.isnan(np.sum(np.column_stack(args)))\n\n if missing_data and not func.startswith(\"nan\"):\n nanf = getattr(np, f\"nan{func}\", None)\n if nanf is None:\n msg = f\"Data contain nans but no nan-aware version of `{func}` found\"\n warnings.warn(msg, UserWarning)\n else:\n f = nanf\n\n else:\n f = func\n\n # Handle numpy changes\n try:\n integers = rng.integers\n except AttributeError:\n integers = rng.randint\n\n # Do the bootstrap\n if units is not None:\n return _structured_bootstrap(args, n_boot, units, f,\n func_kwargs, integers)\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n sample = [a.take(resampler, axis=0) for a in args]\n boot_dist.append(f(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 56, "name": "_handle_random_seed", "kind": "ref", "category": "function", "info": " rng = _handle_random_seed(seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 90, "name": "_structured_bootstrap", "kind": "ref", "category": "function", "info": " return _structured_bootstrap(args, n_boot, units, f,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 95, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 97, "name": "f", "kind": "ref", "category": "function", "info": " boot_dist.append(f(*sample, **func_kwargs))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 101, "name": "_structured_bootstrap", "kind": "def", "category": "function", "info": "def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):\n \"\"\"Resample units instead of datapoints.\"\"\"\n unique_units = np.unique(units)\n n_units = len(unique_units)\n\n args = [[a[units == unit] for unit in unique_units] for a in args]\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n_units, n_units, dtype=np.intp)\n sample = [[a[i] for i in resampler] for a in args]\n lengths = map(len, sample[0])\n resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]\n sample = list(map(np.concatenate, sample))\n boot_dist.append(func(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 110, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n_units, n_units, dtype=np.intp)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 113, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 116, "name": "func", "kind": "ref", "category": "function", "info": " boot_dist.append(func(*sample, **func_kwargs))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 120, "name": "_handle_random_seed", "kind": "def", "category": "function", "info": "def _handle_random_seed(seed=None):\n \"\"\"Given a seed in one of many formats, return a random number generator.\n\n Generalizes across the numpy 1.17 changes, preferring newer functionality.\n\n \"\"\"\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n try:\n # General interface for seeding on numpy >= 1.17\n rng = np.random.default_rng(seed)\n except AttributeError:\n # We are on numpy < 1.17, handle options ourselves\n if isinstance(seed, (numbers.Integral, np.integer)):\n rng = np.random.RandomState(seed)\n elif seed is None:\n rng = np.random.RandomState()\n else:\n err = \"{} cannot be used to seed the random number generator\"\n raise ValueError(err.format(seed))\n return rng\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 131, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 135, "name": "RandomState", "kind": "ref", "category": "function", "info": " rng = np.random.RandomState(seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 137, "name": "RandomState", "kind": "ref", "category": "function", "info": " rng = np.random.RandomState()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 26, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 31, "name": "_BaseGrid", "kind": "def", "category": "class", "info": "set\tfig\tfigure\tapply\tpipe\tsavefig"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 42, "name": "fig", "kind": "def", "category": "function", "info": " def fig(self):\n \"\"\"DEPRECATED: prefer the `figure` property.\"\"\"\n # Grid.figure is preferred because it matches the Axes attribute name.\n # But as the maintanace burden on having this property is minimal,\n # let's be slow about formally deprecating it. For now just note its deprecation\n # in the docstring; add a warning in version 0.13, and eventually remove it.\n return self._figure\n\n @property\n def figure(self):\n \"\"\"Access the :class:`matplotlib.figure.Figure` object underlying the grid.\"\"\"\n return self._figure\n\n def apply(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n \"\"\"\n func(self, *args, **kwargs)\n return self\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 55, "name": "apply", "kind": "def", "category": "function", "info": " def apply(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n \"\"\"\n func(self, *args, **kwargs)\n return self\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 67, "name": "func", "kind": "ref", "category": "function", "info": " func(self, *args, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 70, "name": "pipe", "kind": "def", "category": "function", "info": " def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 82, "name": "func", "kind": "ref", "category": "function", "info": " return func(self, *args, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 97, "name": "Grid", "kind": "def", "category": "class", "info": "__init__\ttight_layout\tadd_legend\t_update_legend_data\t_get_palette\tlegend\ttick_params"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 120, "name": "add_legend", "kind": "def", "category": "function", "info": " def add_legend(self, legend_data=None, title=None, label_order=None,\n adjust_subtitles=False, **kwargs):\n \"\"\"Draw a legend, maybe placing it outside axes and resizing the figure.\n\n Parameters\n ----------\n legend_data : dict\n Dictionary mapping label names (or two-element tuples where the\n second element is a label name) to matplotlib artist handles. The\n default reads from ``self._legend_data``.\n title : string\n Title for the legend. The default reads from ``self._hue_var``.\n label_order : list of labels\n The order that the legend entries should appear in. The default\n reads from ``self.hue_names``.\n adjust_subtitles : bool\n If True, modify entries with invisible artists to left-align\n the labels and set the font size to that of a title.\n kwargs : key, value pairings\n Other keyword arguments are passed to the underlying legend methods\n on the Figure or Axes object.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n # Find the data for the legend\n if legend_data is None:\n legend_data = self._legend_data\n if label_order is None:\n if self.hue_names is None:\n label_order = list(legend_data.keys())\n else:\n label_order = list(map(utils.to_utf8, self.hue_names))\n\n blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n handles = [legend_data.get(l, blank_handle) for l in label_order]\n title = self._hue_var if title is None else title\n title_size = mpl.rcParams[\"legend.title_fontsize\"]\n\n # Unpack nested labels from a hierarchical legend\n labels = []\n for entry in label_order:\n if isinstance(entry, tuple):\n _, label = entry\n else:\n label = entry\n labels.append(label)\n\n # Set default legend kwargs\n kwargs.setdefault(\"scatterpoints\", 1)\n\n if self._legend_out:\n\n kwargs.setdefault(\"frameon\", False)\n kwargs.setdefault(\"loc\", \"center right\")\n\n # Draw a full-figure legend outside the grid\n figlegend = self._figure.legend(handles, labels, **kwargs)\n\n self._legend = figlegend\n figlegend.set_title(title, prop={\"size\": title_size})\n\n if adjust_subtitles:\n adjust_legend_subtitles(figlegend)\n\n # Draw the plot to set the bounding boxes correctly\n _draw_figure(self._figure)\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n fig_width, fig_height = self._figure.get_size_inches()\n self._figure.set_size_inches(fig_width + legend_width, fig_height)\n\n # Draw the plot again to get the new transformations\n _draw_figure(self._figure)\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n space_needed = legend_width / (fig_width + legend_width)\n margin = .04 if self._margin_titles else .01\n self._space_needed = margin + space_needed\n right = 1 - self._space_needed\n\n # Place the subplot axes to give space for the legend\n self._figure.subplots_adjust(right=right)\n self._tight_layout_rect[2] = right\n\n else:\n # Draw a legend in the first axis\n ax = self.axes.flat[0]\n kwargs.setdefault(\"loc\", \"best\")\n\n leg = ax.legend(handles, labels, **kwargs)\n leg.set_title(title, prop={\"size\": title_size})\n self._legend = leg\n\n if adjust_subtitles:\n adjust_legend_subtitles(leg)\n\n return self\n\n def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = ax.legend_.legendHandles\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 157, "name": "Patch", "kind": "ref", "category": "function", "info": " blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 183, "name": "set_title", "kind": "ref", "category": "function", "info": " figlegend.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 186, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(figlegend)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 189, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 192, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 193, "name": "get_size_inches", "kind": "ref", "category": "function", "info": " fig_width, fig_height = self._figure.get_size_inches()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 194, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " self._figure.set_size_inches(fig_width + legend_width, fig_height)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 197, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 200, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 216, "name": "set_title", "kind": "ref", "category": "function", "info": " leg.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 220, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(leg)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 224, "name": "_update_legend_data", "kind": "def", "category": "function", "info": " def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = ax.legend_.legendHandles\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 232, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in ax.legend_.texts]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 235, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, labels = ax.get_legend_handles_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 243, "name": "_get_palette", "kind": "def", "category": "function", "info": " def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 246, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(n_colors=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 249, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 254, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 256, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 258, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 263, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(color_names, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 267, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 269, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(colors, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 362, "name": "FacetGrid", "kind": "def", "category": "class", "info": "__init__\tfacet_data\tmap\tmap_dataframe\t_facet_color\t_facet_plot\t_finalize_grid\tfacet_axis\tdespine\tset_axis_labels\tset_xlabels\tset_ylabels\tset_xticklabels\tset_yticklabels\tset_titles\trefline\taxes\tax\taxes_dict\t_inner_axes\t_left_axes\t_not_left_axes\t_bottom_axes\t_not_bottom_axes"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 382, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 384, "name": "_get_palette", "kind": "ref", "category": "function", "info": " colors = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 390, "name": "categorical_order", "kind": "ref", "category": "function", "info": " row_names = categorical_order(data[row], row_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 395, "name": "categorical_order", "kind": "ref", "category": "function", "info": " col_names = categorical_order(data[col], col_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 445, "name": "_disable_autolayout", "kind": "ref", "category": "function", "info": " with _disable_autolayout():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 475, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 481, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 524, "name": "set_titles", "kind": "ref", "category": "function", "info": " self.set_titles()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 528, "name": "despine", "kind": "ref", "category": "function", "info": " self.despine()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 532, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 533, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 534, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 535, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 539, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 540, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 541, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 542, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 636, "name": "facet_data", "kind": "def", "category": "function", "info": " def facet_data(self):\n \"\"\"Generator for name indices and data subsets for each facet.\n\n Yields\n ------\n (i, j, k), data_ijk : tuple of ints, DataFrame\n The ints provide an index into the {row, col, hue}_names attribute,\n and the dataframe contains a subset of the full data corresponding\n to each facet. The generator yields subsets that correspond with\n the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`\n is None.\n\n \"\"\"\n data = self.data\n\n # Construct masks for the row variable\n if self.row_names:\n row_masks = [data[self._row_var] == n for n in self.row_names]\n else:\n row_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the column variable\n if self.col_names:\n col_masks = [data[self._col_var] == n for n in self.col_names]\n else:\n col_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the hue variable\n if self.hue_names:\n hue_masks = [data[self._hue_var] == n for n in self.hue_names]\n else:\n hue_masks = [np.repeat(True, len(self.data))]\n\n # Here is the main generator loop\n for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),\n enumerate(col_masks),\n enumerate(hue_masks)):\n data_ijk = data[row & col & hue & self._not_na]\n yield (i, j, k), data_ijk\n\n def map(self, func, *args, **kwargs):\n \"\"\"Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # How we use the function depends on where it comes from\n func_module = str(getattr(func, \"__module__\", \"\"))\n\n # Check for categorical plots without order information\n if func_module == \"seaborn.categorical\":\n if \"order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n if len(args) == 3 and \"hue_order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`hue_order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not func_module.startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n\n # Get the actual data we are going to plot with\n plot_data = data_ijk[list(args)]\n if self._dropna:\n plot_data = plot_data.dropna()\n plot_args = [v for k, v in plot_data.iteritems()]\n\n # Some matplotlib functions don't handle pandas objects correctly\n if func_module.startswith(\"matplotlib\"):\n plot_args = [v.values for v in plot_args]\n\n # Draw the plot\n self._facet_plot(func, ax, plot_args, kwargs)\n\n # Finalize the annotations and layout\n self._finalize_grid(args[:2])\n\n return self\n\n def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 719, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 727, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 730, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 738, "name": "to_utf8", "kind": "ref", "category": "function", "info": " kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 743, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 744, "name": "iteritems", "kind": "ref", "category": "function", "info": " plot_args = [v for k, v in plot_data.iteritems()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 751, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, plot_args, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 754, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(args[:2])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 758, "name": "map_dataframe", "kind": "def", "category": "function", "info": " def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 791, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 799, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 802, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 814, "name": "dropna", "kind": "ref", "category": "function", "info": " data_ijk = data_ijk.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 818, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, args, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 825, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(axis_labels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 829, "name": "_facet_color", "kind": "def", "category": "function", "info": " def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 837, "name": "_facet_plot", "kind": "def", "category": "function", "info": " def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 847, "name": "func", "kind": "ref", "category": "function", "info": " func(*plot_args, **plot_kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 850, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 852, "name": "_finalize_grid", "kind": "def", "category": "function", "info": " def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 854, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " self.set_axis_labels(*axlabels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 857, "name": "facet_axis", "kind": "def", "category": "function", "info": " def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 871, "name": "despine", "kind": "def", "category": "function", "info": " def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 873, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(self._figure, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 876, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 880, "name": "set_xlabels", "kind": "ref", "category": "function", "info": " self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 883, "name": "set_ylabels", "kind": "ref", "category": "function", "info": " self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 887, "name": "set_xlabels", "kind": "def", "category": "function", "info": " def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 892, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 895, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(\"\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 898, "name": "set_ylabels", "kind": "def", "category": "function", "info": " def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 903, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 906, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(\"\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 909, "name": "set_xticklabels", "kind": "def", "category": "function", "info": " def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 912, "name": "get_xticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_xticks()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 913, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(curr_ticks)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 915, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 915, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 917, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = ax.get_xticks()[::step]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 919, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(xticks)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 920, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 922, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(labels, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 925, "name": "set_yticklabels", "kind": "def", "category": "function", "info": " def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 928, "name": "get_yticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_yticks()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 929, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(curr_ticks)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 932, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 934, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(labels, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 937, "name": "set_titles", "kind": "def", "category": "function", "info": " def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 976, "name": "to_utf8", "kind": "ref", "category": "function", "info": " row_template = utils.to_utf8(row_template)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 977, "name": "to_utf8", "kind": "ref", "category": "function", "info": " col_template = utils.to_utf8(col_template)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 978, "name": "to_utf8", "kind": "ref", "category": "function", "info": " template = utils.to_utf8(template)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1005, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[0, j].set_title(title, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1015, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, j].set_title(title, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1020, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, 0].set_title(title, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1026, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes.flat[i].set_title(title, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1029, "name": "refline", "kind": "def", "category": "function", "info": " def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1071, "name": "ax", "kind": "def", "category": "function", "info": " def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1082, "name": "axes_dict", "kind": "def", "category": "function", "info": " def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1095, "name": "_inner_axes", "kind": "def", "category": "function", "info": " def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1113, "name": "_left_axes", "kind": "def", "category": "function", "info": " def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1125, "name": "_not_left_axes", "kind": "def", "category": "function", "info": " def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1137, "name": "_bottom_axes", "kind": "def", "category": "function", "info": " def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1154, "name": "_not_bottom_axes", "kind": "def", "category": "function", "info": " def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1171, "name": "PairGrid", "kind": "def", "category": "class", "info": "__init__\tmap\tmap_lower\tmap_upper\tmap_offdiag\tmap_diag\t_map_diag_iter_hue\t_map_bivariate\t_plot_bivariate\t_plot_bivariate_iter_hue\t_add_axis_labels\t_find_numeric_cols"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1244, "name": "_find_numeric_cols", "kind": "ref", "category": "function", "info": " numeric_cols = self._find_numeric_cols(data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1272, "name": "_disable_autolayout", "kind": "ref", "category": "function", "info": " with _disable_autolayout():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1303, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1320, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = hue_order = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1332, "name": "_get_palette", "kind": "ref", "category": "function", "info": " self.palette = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1339, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1340, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1341, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1342, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1347, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1348, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1349, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1350, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1356, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(fig=fig)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1372, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1376, "name": "map_lower", "kind": "def", "category": "function", "info": " def map_lower(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the lower diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.tril_indices_from(self.axes, -1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1388, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1391, "name": "map_upper", "kind": "def", "category": "function", "info": " def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1403, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1406, "name": "map_offdiag", "kind": "def", "category": "function", "info": " def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1418, "name": "map_lower", "kind": "ref", "category": "function", "info": " self.map_lower(func, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1420, "name": "map_upper", "kind": "ref", "category": "function", "info": " self.map_upper(func, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1427, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1430, "name": "map_diag", "kind": "def", "category": "function", "info": " def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1453, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " diag_ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1460, "name": "set_visible", "kind": "ref", "category": "function", "info": " tick.tick1line.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1464, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1466, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1472, "name": "share_axis", "kind": "ref", "category": "function", "info": " share_axis(diag_axes[0], ax, \"y\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1478, "name": "_map_diag_iter_hue", "kind": "ref", "category": "function", "info": " return self._map_diag_iter_hue(func, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1506, "name": "func", "kind": "ref", "category": "function", "info": " func(x=vector, **plot_kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1509, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1512, "name": "_map_diag_iter_hue", "kind": "def", "category": "function", "info": " def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1518, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data[var].groupby(self.hue_vals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1530, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1540, "name": "remove_na", "kind": "ref", "category": "function", "info": " data_k = utils.remove_na(data_k)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1543, "name": "func", "kind": "ref", "category": "function", "info": " func(x=data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1545, "name": "func", "kind": "ref", "category": "function", "info": " func(data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1547, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1551, "name": "_map_bivariate", "kind": "def", "category": "function", "info": " def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1567, "name": "_plot_bivariate", "kind": "ref", "category": "function", "info": " self._plot_bivariate(x_var, y_var, ax, func, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1568, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1573, "name": "_plot_bivariate", "kind": "def", "category": "function", "info": " def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1576, "name": "_plot_bivariate_iter_hue", "kind": "ref", "category": "function", "info": " self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1595, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1608, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1610, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1612, "name": "_plot_bivariate_iter_hue", "kind": "def", "category": "function", "info": " def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1625, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data.groupby(self.hue_vals)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1632, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1638, "name": "dropna", "kind": "ref", "category": "function", "info": " data_k = data_k[axes_vars].dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1650, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1652, "name": "func", "kind": "ref", "category": "function", "info": " func(x, y, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1654, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1656, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1659, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1661, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1663, "name": "_find_numeric_cols", "kind": "def", "category": "function", "info": " def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1667, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1672, "name": "JointGrid", "kind": "def", "category": "class", "info": "__init__\t_inject_kwargs\tplot\tplot_joint\tplot_marginals\trefline\tset_axis_labels"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1692, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_joint = f.add_subplot(gs[1:, :-1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1693, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1694, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1702, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1703, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1704, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1705, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1709, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1710, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1711, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1712, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1713, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1714, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1715, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1716, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1721, "name": "VectorPlotter", "kind": "ref", "category": "function", "info": " p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1726, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1728, "name": "get_var", "kind": "def", "category": "function", "info": " def get_var(var):\n vector = plot_data.get(var, None)\n if vector is not None:\n vector = vector.rename(p.variables.get(var, None))\n return vector\n\n self.x = get_var(\"x\")\n self.y = get_var(\"y\")\n self.hue = get_var(\"hue\")\n\n for axis in \"xy\":\n name = p.variables.get(axis, None)\n if name is not None:\n getattr(ax_joint, f\"set_{axis}label\")(name)\n\n if xlim is not None:\n ax_joint.set_xlim(xlim)\n if ylim is not None:\n ax_joint.set_ylim(ylim)\n\n # Store the semantic mapping parameters for axes-level functions\n self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)\n\n # Make the grid look nice\n utils.despine(f)\n if not marginal_ticks:\n utils.despine(ax=ax_marg_x, left=True)\n utils.despine(ax=ax_marg_y, bottom=True)\n for axes in [ax_marg_x, ax_marg_y]:\n for axis in [axes.xaxis, axes.yaxis]:\n axis.label.set_visible(False)\n f.tight_layout()\n f.subplots_adjust(hspace=space, wspace=space)\n\n def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1731, "name": "rename", "kind": "ref", "category": "function", "info": " vector = vector.rename(p.variables.get(var, None))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1734, "name": "get_var", "kind": "ref", "category": "function", "info": " self.x = get_var(\"x\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1735, "name": "get_var", "kind": "ref", "category": "function", "info": " self.y = get_var(\"y\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1736, "name": "get_var", "kind": "ref", "category": "function", "info": " self.hue = get_var(\"hue\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1744, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax_joint.set_xlim(xlim)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1746, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax_joint.set_ylim(ylim)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1752, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(f)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1754, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_x, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1755, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_y, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1758, "name": "set_visible", "kind": "ref", "category": "function", "info": " axis.label.set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1762, "name": "_inject_kwargs", "kind": "def", "category": "function", "info": " def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1791, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " self.plot_marginals(marginal_func, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1792, "name": "plot_joint", "kind": "ref", "category": "function", "info": " self.plot_joint(joint_func, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1795, "name": "plot_joint", "kind": "def", "category": "function", "info": " def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1822, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1825, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, y=self.y, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1827, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, self.y, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1831, "name": "plot_marginals", "kind": "def", "category": "function", "info": " def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1861, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1876, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, ax=self.ax_marg_x, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1879, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, **orient_kw_x, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1882, "name": "func", "kind": "ref", "category": "function", "info": " func(y=self.y, ax=self.ax_marg_y, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1885, "name": "func", "kind": "ref", "category": "function", "info": " func(self.y, **orient_kw_y, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1887, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1887, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1888, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1888, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1892, "name": "refline", "kind": "def", "category": "function", "info": " def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1936, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1955, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1956, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2004, "name": "pairplot", "kind": "def", "category": "function", "info": "def pairplot(\n data, *,\n hue=None, hue_order=None, palette=None,\n vars=None, x_vars=None, y_vars=None,\n kind=\"scatter\", diag_kind=\"auto\", markers=None,\n height=2.5, aspect=1, corner=False, dropna=False,\n plot_kws=None, diag_kws=None, grid_kws=None, size=None,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2113, "name": "PairGrid", "kind": "ref", "category": "function", "info": " grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2143, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(histplot, **diag_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2147, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(kdeplot, **diag_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2157, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(scatterplot, **plot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2160, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(regplot, **plot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2164, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(kdeplot, **plot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2167, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(histplot, **plot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2171, "name": "add_legend", "kind": "ref", "category": "function", "info": " grid.add_legend()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2178, "name": "jointplot", "kind": "def", "category": "function", "info": "def jointplot(\n data=None, *, x=None, y=None, hue=None, kind=\"scatter\",\n height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,\n color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,\n joint_kws=None, marginal_kws=None,\n **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2217, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", plot_kinds, kind)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2230, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color_rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2231, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " colors = [utils.set_hls_values(color_rgb, l=l) # noqa\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2233, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(colors, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2240, "name": "JointGrid", "kind": "ref", "category": "function", "info": " grid = JointGrid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2254, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(scatterplot, **joint_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2264, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(marg_func, **marginal_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2272, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(histplot, **joint_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2287, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2288, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2294, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(kdeplot, **joint_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2300, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(kdeplot, **marginal_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2304, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " x_bins = min(_freedman_diaconis_bins(grid.x), 50)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2305, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " y_bins = min(_freedman_diaconis_bins(grid.y), 50)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2310, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(plt.hexbin, **joint_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2314, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2320, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2323, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(regplot, **joint_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2328, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(residplot, **joint_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2330, "name": "get_offsets", "kind": "ref", "category": "function", "info": " x, y = grid.ax_joint.collections[0].get_offsets().T\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2332, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2333, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 43, "name": "_CategoricalPlotterNew", "kind": "def", "category": "class", "info": "__init__\t_hue_backcompat\t_palette_without_hue_backcompat\tcat_axis\t_get_gray\t_adjust_cat_axis\t_native_width\t_nested_offsets\tplot_strips\tplot_swarms"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 77, "name": "rename", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.rename(columns={\"x\": \"y\", \"y\": \"x\"})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 95, "name": "infer_orient", "kind": "ref", "category": "function", "info": " self.orient = infer_orient(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 117, "name": "categorical_order", "kind": "ref", "category": "function", "info": " cat_levels = categorical_order(self.plot_data[self.cat_axis], order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 120, "name": "_hue_backcompat", "kind": "def", "category": "function", "info": " def _hue_backcompat(self, color, palette, hue_order, force_hue=False):\n \"\"\"Implement backwards compatibility for hue parametrization.\n\n Note: the force_hue parameter is used so that functions can be shown to\n pass existing tests during refactoring and then tested for new behavior.\n It can be removed after completion of the work.\n\n \"\"\"\n # The original categorical functions applied a palette to the categorical axis\n # by default. We want to require an explicit hue mapping, to be more consistent\n # with how things work elsewhere now. I don't think there's any good way to\n # do this gently -- because it's triggered by the default value of hue=None,\n # users would always get a warning, unless we introduce some sentinel \"default\"\n # argument for this change. That's possible, but asking users to set `hue=None`\n # on every call is annoying.\n # We are keeping the logic for implementing the old behavior in with the current\n # system so that (a) we can punt on that decision and (b) we can ensure that\n # refactored code passes old tests.\n default_behavior = color is None or palette is not None\n if force_hue and \"hue\" not in self.variables and default_behavior:\n self._redundant_hue = True\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables[self.cat_axis]\n self.var_types[\"hue\"] = \"categorical\"\n hue_order = self.var_levels[self.cat_axis]\n\n # Because we convert the categorical axis variable to string,\n # we need to update a dictionary palette too\n if isinstance(palette, dict):\n palette = {str(k): v for k, v in palette.items()}\n\n else:\n self._redundant_hue = False\n\n # Previously, categorical plots had a trick where color= could seed the palette.\n # Because that's an explicit parameterization, we are going to give it one\n # release cycle with a warning before removing.\n if \"hue\" in self.variables and palette is None and color is not None:\n if not isinstance(color, str):\n color = mpl.colors.to_hex(color)\n palette = f\"dark:{color}\"\n msg = (\n \"Setting a gradient palette using color= is deprecated and will be \"\n f\"removed in version 0.13. Set `palette='{palette}'` for same effect.\"\n )\n warnings.warn(msg, FutureWarning)\n\n return palette, hue_order\n\n def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = \"Passing `palette` without assigning `hue` is deprecated.\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables.get(self.cat_axis)\n self.var_types[\"hue\"] = self.var_types.get(self.cat_axis)\n hue_order = self.var_levels.get(self.cat_axis)\n return hue_order\n\n @property\n def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 159, "name": "to_hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.to_hex(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 169, "name": "_palette_without_hue_backcompat", "kind": "def", "category": "function", "info": " def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = \"Passing `palette` without assigning `hue` is deprecated.\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables.get(self.cat_axis)\n self.var_types[\"hue\"] = self.var_types.get(self.cat_axis)\n hue_order = self.var_levels.get(self.cat_axis)\n return hue_order\n\n @property\n def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 182, "name": "cat_axis", "kind": "def", "category": "function", "info": " def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 185, "name": "_get_gray", "kind": "def", "category": "function", "info": " def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 194, "name": "_adjust_cat_axis", "kind": "def", "category": "function", "info": " def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 219, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, n - .5, auto=None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 223, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(n - .5, -.5, auto=None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 226, "name": "_native_width", "kind": "def", "category": "function", "info": " def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 235, "name": "_nested_offsets", "kind": "def", "category": "function", "info": " def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 254, "name": "plot_strips", "kind": "def", "category": "function", "info": " def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data.loc[:, self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 264, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 282, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 288, "name": "jitterer", "kind": "ref", "category": "function", "info": " jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 294, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 297, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 301, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 304, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 315, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 316, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 320, "name": "plot_swarms", "kind": "def", "category": "function", "info": " def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data.loc[:, self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 330, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 340, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 351, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 354, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 358, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 361, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 368, "name": "Beeswarm", "kind": "ref", "category": "function", "info": " beeswarm = Beeswarm(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 376, "name": "beeswarm", "kind": "ref", "category": "function", "info": " beeswarm(points, center)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 380, "name": "get_autoscaley_on", "kind": "ref", "category": "function", "info": " scaley = ax.get_autoscaley_on()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 382, "name": "get_autoscalex_on", "kind": "ref", "category": "function", "info": " scalex = ax.get_autoscalex_on()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 390, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(points.get_datalim(ax.transData))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 392, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=scalex, scaley=scaley)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 398, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 407, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 408, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 413, "name": "_CategoricalFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 418, "name": "_CategoricalPlotter", "kind": "def", "category": "class", "info": "establish_variables\t_group_longform\testablish_colors\thue_offsets\tnested_width\tannotate_axes\tadd_legend_data"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 424, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, x=None, y=None, hue=None, data=None,\n orient=None, order=None, hue_order=None,\n units=None):\n \"\"\"Convert input specification into a common representation.\"\"\"\n # Option 1:\n # We are plotting a wide-form dataset\n # -----------------------------------\n if x is None and y is None:\n\n # Do a sanity check on the inputs\n if hue is not None:\n error = \"Cannot use `hue` without `x` and `y`\"\n raise ValueError(error)\n\n # No hue grouping with wide inputs\n plot_hues = None\n hue_title = None\n hue_names = None\n\n # No statistical units with wide inputs\n plot_units = None\n\n # We also won't get a axes labels here\n value_label = None\n group_label = None\n\n # Option 1a:\n # The input data is a Pandas DataFrame\n # ------------------------------------\n\n if isinstance(data, pd.DataFrame):\n\n # Order the data correctly\n if order is None:\n order = []\n # Reduce to just numeric columns\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n order.append(col)\n plot_data = data[order]\n group_names = order\n group_label = data.columns.name\n\n # Convert to a list of arrays, the common representation\n iter_data = plot_data.iteritems()\n plot_data = [np.asarray(s, float) for k, s in iter_data]\n\n # Option 1b:\n # The input data is an array or list\n # ----------------------------------\n\n else:\n\n # We can't reorder the data\n if order is not None:\n error = \"Input data must be a pandas object to reorder\"\n raise ValueError(error)\n\n # The input data is an array\n if hasattr(data, \"shape\"):\n if len(data.shape) == 1:\n if np.isscalar(data[0]):\n plot_data = [data]\n else:\n plot_data = list(data)\n elif len(data.shape) == 2:\n nr, nc = data.shape\n if nr == 1 or nc == 1:\n plot_data = [data.ravel()]\n else:\n plot_data = [data[:, i] for i in range(nc)]\n else:\n error = (\"Input `data` can have no \"\n \"more than 2 dimensions\")\n raise ValueError(error)\n\n # Check if `data` is None to let us bail out here (for testing)\n elif data is None:\n plot_data = [[]]\n\n # The input data is a flat list\n elif np.isscalar(data[0]):\n plot_data = [data]\n\n # The input data is a nested list\n # This will catch some things that might fail later\n # but exhaustive checks are hard\n else:\n plot_data = data\n\n # Convert to a list of arrays, the common representation\n plot_data = [np.asarray(d, float) for d in plot_data]\n\n # The group names will just be numeric indices\n group_names = list(range(len(plot_data)))\n\n # Figure out the plotting orientation\n orient = \"h\" if str(orient).startswith(\"h\") else \"v\"\n\n # Option 2:\n # We are plotting a long-form dataset\n # -----------------------------------\n\n else:\n\n # See if we need to get variables from `data`\n if data is not None:\n x = data.get(x, x)\n y = data.get(y, y)\n hue = data.get(hue, hue)\n units = data.get(units, units)\n\n # Validate the inputs\n for var in [x, y, hue, units]:\n if isinstance(var, str):\n err = f\"Could not interpret input '{var}'\"\n raise ValueError(err)\n\n # Figure out the plotting orientation\n orient = infer_orient(\n x, y, orient, require_numeric=self.require_numeric\n )\n\n # Option 2a:\n # We are plotting a single set of data\n # ------------------------------------\n if x is None or y is None:\n\n # Determine where the data are\n vals = y if x is None else x\n\n # Put them into the common representation\n plot_data = [np.asarray(vals)]\n\n # Get a label for the value axis\n if hasattr(vals, \"name\"):\n value_label = vals.name\n else:\n value_label = None\n\n # This plot will not have group labels or hue nesting\n groups = None\n group_label = None\n group_names = []\n plot_hues = None\n hue_names = None\n hue_title = None\n plot_units = None\n\n # Option 2b:\n # We are grouping the data values by another variable\n # ---------------------------------------------------\n else:\n\n # Determine which role each variable will play\n if orient == \"v\":\n vals, groups = y, x\n else:\n vals, groups = x, y\n\n # Get the categorical axis label\n group_label = None\n if hasattr(groups, \"name\"):\n group_label = groups.name\n\n # Get the order on the categorical axis\n group_names = categorical_order(groups, order)\n\n # Group the numeric data\n plot_data, value_label = self._group_longform(vals, groups,\n group_names)\n\n # Now handle the hue levels for nested ordering\n if hue is None:\n plot_hues = None\n hue_title = None\n hue_names = None\n else:\n\n # Get the order of the hue levels\n hue_names = categorical_order(hue, hue_order)\n\n # Group the hue data\n plot_hues, hue_title = self._group_longform(hue, groups,\n group_names)\n\n # Now handle the units for nested observations\n if units is None:\n plot_units = None\n else:\n plot_units, _ = self._group_longform(units, groups,\n group_names)\n\n # Assign object attributes\n # ------------------------\n self.orient = orient\n self.plot_data = plot_data\n self.group_label = group_label\n self.value_label = value_label\n self.group_names = group_names\n self.plot_hues = plot_hues\n self.hue_title = hue_title\n self.hue_names = hue_names\n self.plot_units = plot_units\n\n def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 461, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 468, "name": "iteritems", "kind": "ref", "category": "function", "info": " iter_data = plot_data.iteritems()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 543, "name": "infer_orient", "kind": "ref", "category": "function", "info": " orient = infer_orient(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 590, "name": "categorical_order", "kind": "ref", "category": "function", "info": " group_names = categorical_order(groups, order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 593, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_data, value_label = self._group_longform(vals, groups,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 604, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(hue, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 607, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_hues, hue_title = self._group_longform(hue, groups,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 614, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_units, _ = self._group_longform(units, groups,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 629, "name": "_group_longform", "kind": "def", "category": "function", "info": " def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 640, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped_vals = vals.groupby(grouper)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 644, "name": "get_group", "kind": "ref", "category": "function", "info": " g_vals = grouped_vals.get_group(g)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 654, "name": "establish_colors", "kind": "def", "category": "function", "info": " def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 665, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 667, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 669, "name": "husl_palette", "kind": "ref", "category": "function", "info": " colors = husl_palette(n_colors, l=.7) # noqa\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 680, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 682, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 695, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 699, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(colors, desat=saturation)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 702, "name": "color_palette", "kind": "ref", "category": "function", "info": " rgb_colors = color_palette(colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 707, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " gray = mpl.colors.rgb2hex((lum, lum, lum))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 714, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 727, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 735, "name": "annotate_axes", "kind": "def", "category": "function", "info": " def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 743, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 745, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 752, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 753, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(group_names)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 755, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 756, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(group_names)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 760, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 763, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 768, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 775, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(rect)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 778, "name": "_BoxPlotter", "kind": "def", "category": "class", "info": "__init__\tdraw_boxplot\trestyle_boxplot\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 784, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 785, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 795, "name": "draw_boxplot", "kind": "def", "category": "function", "info": " def draw_boxplot(self, ax, kws):\n \"\"\"Use matplotlib to draw a boxplot on an Axes.\"\"\"\n vert = self.orient == \"v\"\n\n props = {}\n for obj in [\"box\", \"whisker\", \"cap\", \"median\", \"flier\"]:\n props[obj] = kws.pop(obj + \"props\", {})\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = np.asarray(remove_na(group_data))\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n artist_dict = ax.boxplot(box_data,\n vert=vert,\n patch_artist=True,\n positions=[i],\n widths=self.width,\n **kws)\n color = self.colors[i]\n self.restyle_boxplot(artist_dict, color, props)\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = np.asarray(remove_na(group_data[hue_mask]))\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n center = i + offsets[j]\n artist_dict = ax.boxplot(box_data,\n vert=vert,\n patch_artist=True,\n positions=[center],\n widths=self.nested_width,\n **kws)\n self.restyle_boxplot(artist_dict, self.colors[j], props)\n # Add legend data, but just for one set of boxes\n\n def restyle_boxplot(self, artist_dict, color, props):\n \"\"\"Take a drawn matplotlib boxplot and make it look nice.\"\"\"\n for box in artist_dict[\"boxes\"]:\n box.update(dict(facecolor=color,\n zorder=.9,\n edgecolor=self.gray,\n linewidth=self.linewidth))\n box.update(props[\"box\"])\n for whisk in artist_dict[\"whiskers\"]:\n whisk.update(dict(color=self.gray,\n linewidth=self.linewidth,\n linestyle=\"-\"))\n whisk.update(props[\"whisker\"])\n for cap in artist_dict[\"caps\"]:\n cap.update(dict(color=self.gray,\n linewidth=self.linewidth))\n cap.update(props[\"cap\"])\n for med in artist_dict[\"medians\"]:\n med.update(dict(color=self.gray,\n linewidth=self.linewidth))\n med.update(props[\"median\"])\n for fly in artist_dict[\"fliers\"]:\n fly.update(dict(markerfacecolor=self.gray,\n marker=\"d\",\n markeredgecolor=self.gray,\n markersize=self.fliersize))\n fly.update(props[\"flier\"])\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_boxplot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 813, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = np.asarray(remove_na(group_data))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 826, "name": "restyle_boxplot", "kind": "ref", "category": "function", "info": " self.restyle_boxplot(artist_dict, color, props)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 834, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 841, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = np.asarray(remove_na(group_data[hue_mask]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 854, "name": "restyle_boxplot", "kind": "ref", "category": "function", "info": " self.restyle_boxplot(artist_dict, self.colors[j], props)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 857, "name": "restyle_boxplot", "kind": "def", "category": "function", "info": " def restyle_boxplot(self, artist_dict, color, props):\n \"\"\"Take a drawn matplotlib boxplot and make it look nice.\"\"\"\n for box in artist_dict[\"boxes\"]:\n box.update(dict(facecolor=color,\n zorder=.9,\n edgecolor=self.gray,\n linewidth=self.linewidth))\n box.update(props[\"box\"])\n for whisk in artist_dict[\"whiskers\"]:\n whisk.update(dict(color=self.gray,\n linewidth=self.linewidth,\n linestyle=\"-\"))\n whisk.update(props[\"whisker\"])\n for cap in artist_dict[\"caps\"]:\n cap.update(dict(color=self.gray,\n linewidth=self.linewidth))\n cap.update(props[\"cap\"])\n for med in artist_dict[\"medians\"]:\n med.update(dict(color=self.gray,\n linewidth=self.linewidth))\n med.update(props[\"median\"])\n for fly in artist_dict[\"fliers\"]:\n fly.update(dict(markerfacecolor=self.gray,\n marker=\"d\",\n markeredgecolor=self.gray,\n markersize=self.fliersize))\n fly.update(props[\"flier\"])\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_boxplot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 887, "name": "draw_boxplot", "kind": "ref", "category": "function", "info": " self.draw_boxplot(ax, boxplot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 888, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 890, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 893, "name": "_ViolinPlotter", "kind": "def", "category": "class", "info": "__init__\testimate_densities\tfit_kde\tkde_support\tscale_area\tscale_width\tscale_count\tdwidth\tdraw_violins\tdraw_single_observation\tdraw_box_lines\tdraw_quartiles\tdraw_points\tdraw_stick_lines\tdraw_to_density\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 900, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 901, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 902, "name": "estimate_densities", "kind": "ref", "category": "function", "info": " self.estimate_densities(bw, cut, scale, scale_hue, gridsize)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 926, "name": "estimate_densities", "kind": "def", "category": "function", "info": " def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):\n \"\"\"Find the support and density for all of the data.\"\"\"\n # Initialize data structures to keep track of plotting data\n if self.hue_names is None:\n support = []\n density = []\n counts = np.zeros(len(self.plot_data))\n max_density = np.zeros(len(self.plot_data))\n else:\n support = [[] for _ in self.plot_data]\n density = [[] for _ in self.plot_data]\n size = len(self.group_names), len(self.hue_names)\n counts = np.zeros(size)\n max_density = np.zeros(size)\n\n for i, group_data in enumerate(self.plot_data):\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n # Strip missing datapoints\n kde_data = remove_na(group_data)\n\n # Handle special case of no data at this level\n if kde_data.size == 0:\n support.append(np.array([]))\n density.append(np.array([1.]))\n counts[i] = 0\n max_density[i] = 0\n continue\n\n # Handle special case of a single unique datapoint\n elif np.unique(kde_data).size == 1:\n support.append(np.unique(kde_data))\n density.append(np.array([1.]))\n counts[i] = 1\n max_density[i] = 0\n continue\n\n # Fit the KDE and get the used bandwidth size\n kde, bw_used = self.fit_kde(kde_data, bw)\n\n # Determine the support grid and get the density over it\n support_i = self.kde_support(kde_data, bw_used, cut, gridsize)\n density_i = kde.evaluate(support_i)\n\n # Update the data structures with these results\n support.append(support_i)\n density.append(density_i)\n counts[i] = kde_data.size\n max_density[i] = density_i.max()\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n for j, hue_level in enumerate(self.hue_names):\n\n # Handle special case of no data at this category level\n if not group_data.size:\n support[i].append(np.array([]))\n density[i].append(np.array([1.]))\n counts[i, j] = 0\n max_density[i, j] = 0\n continue\n\n # Select out the observations for this hue level\n hue_mask = self.plot_hues[i] == hue_level\n\n # Strip missing datapoints\n kde_data = remove_na(group_data[hue_mask])\n\n # Handle special case of no data at this level\n if kde_data.size == 0:\n support[i].append(np.array([]))\n density[i].append(np.array([1.]))\n counts[i, j] = 0\n max_density[i, j] = 0\n continue\n\n # Handle special case of a single unique datapoint\n elif np.unique(kde_data).size == 1:\n support[i].append(np.unique(kde_data))\n density[i].append(np.array([1.]))\n counts[i, j] = 1\n max_density[i, j] = 0\n continue\n\n # Fit the KDE and get the used bandwidth size\n kde, bw_used = self.fit_kde(kde_data, bw)\n\n # Determine the support grid and get the density over it\n support_ij = self.kde_support(kde_data, bw_used,\n cut, gridsize)\n density_ij = kde.evaluate(support_ij)\n\n # Update the data structures with these results\n support[i].append(support_ij)\n density[i].append(density_ij)\n counts[i, j] = kde_data.size\n max_density[i, j] = density_ij.max()\n\n # Scale the height of the density curve.\n # For a violinplot the density is non-quantitative.\n # The objective here is to scale the curves relative to 1 so that\n # they can be multiplied by the width parameter during plotting.\n\n if scale == \"area\":\n self.scale_area(density, max_density, scale_hue)\n\n elif scale == \"width\":\n self.scale_width(density)\n\n elif scale == \"count\":\n self.scale_count(density, counts, scale_hue)\n\n else:\n raise ValueError(f\"scale method '{scale}' not recognized\")\n\n # Set object attributes that will be used while plotting\n self.support = support\n self.density = density\n\n def fit_kde(self, x, bw):\n \"\"\"Estimate a KDE for a vector of data with flexible bandwidth.\"\"\"\n kde = gaussian_kde(x, bw)\n\n # Extract the numeric bandwidth from the KDE object\n bw_used = kde.factor\n\n # At this point, bw will be a numeric scale factor.\n # To get the actual bandwidth of the kernel, we multiple by the\n # unbiased standard deviation of the data, which we will use\n # elsewhere to compute the range of the support.\n bw_used = bw_used * x.std(ddof=1)\n\n return kde, bw_used\n\n def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 949, "name": "remove_na", "kind": "ref", "category": "function", "info": " kde_data = remove_na(group_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 968, "name": "fit_kde", "kind": "ref", "category": "function", "info": " kde, bw_used = self.fit_kde(kde_data, bw)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 971, "name": "kde_support", "kind": "ref", "category": "function", "info": " support_i = self.kde_support(kde_data, bw_used, cut, gridsize)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 998, "name": "remove_na", "kind": "ref", "category": "function", "info": " kde_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1017, "name": "fit_kde", "kind": "ref", "category": "function", "info": " kde, bw_used = self.fit_kde(kde_data, bw)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1020, "name": "kde_support", "kind": "ref", "category": "function", "info": " support_ij = self.kde_support(kde_data, bw_used,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1036, "name": "scale_area", "kind": "ref", "category": "function", "info": " self.scale_area(density, max_density, scale_hue)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1039, "name": "scale_width", "kind": "ref", "category": "function", "info": " self.scale_width(density)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1042, "name": "scale_count", "kind": "ref", "category": "function", "info": " self.scale_count(density, counts, scale_hue)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1051, "name": "fit_kde", "kind": "def", "category": "function", "info": " def fit_kde(self, x, bw):\n \"\"\"Estimate a KDE for a vector of data with flexible bandwidth.\"\"\"\n kde = gaussian_kde(x, bw)\n\n # Extract the numeric bandwidth from the KDE object\n bw_used = kde.factor\n\n # At this point, bw will be a numeric scale factor.\n # To get the actual bandwidth of the kernel, we multiple by the\n # unbiased standard deviation of the data, which we will use\n # elsewhere to compute the range of the support.\n bw_used = bw_used * x.std(ddof=1)\n\n return kde, bw_used\n\n def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1066, "name": "kde_support", "kind": "def", "category": "function", "info": " def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1072, "name": "scale_area", "kind": "def", "category": "function", "info": " def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1094, "name": "scale_width", "kind": "def", "category": "function", "info": " def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1104, "name": "scale_count", "kind": "def", "category": "function", "info": " def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1128, "name": "dwidth", "kind": "def", "category": "function", "info": " def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1137, "name": "draw_violins", "kind": "def", "category": "function", "info": " def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1157, "name": "item", "kind": "ref", "category": "function", "info": " val = support.item()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1158, "name": "item", "kind": "ref", "category": "function", "info": " d = density.item()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1159, "name": "draw_single_observation", "kind": "ref", "category": "function", "info": " self.draw_single_observation(ax, i, val, d)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1164, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1175, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1179, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1183, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data, support, density, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1187, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data, support, density, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1191, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1205, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1213, "name": "item", "kind": "ref", "category": "function", "info": " val = support.item()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1214, "name": "item", "kind": "ref", "category": "function", "info": " d = density.item()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1218, "name": "draw_single_observation", "kind": "ref", "category": "function", "info": " self.draw_single_observation(ax, at_group, val, d)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1228, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1233, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1244, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1248, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1254, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1264, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1268, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1272, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1279, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1290, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1294, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i + offsets[j])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1298, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1304, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1310, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i + offsets[j])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1312, "name": "draw_single_observation", "kind": "def", "category": "function", "info": " def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1326, "name": "draw_box_lines", "kind": "def", "category": "function", "info": " def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1360, "name": "draw_quartiles", "kind": "def", "category": "function", "info": " def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1364, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q25, support, density, split,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1367, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q50, support, density, split,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1370, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q75, support, density, split,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1374, "name": "draw_points", "kind": "def", "category": "function", "info": " def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1387, "name": "draw_stick_lines", "kind": "def", "category": "function", "info": " def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1391, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, val, support, density, split,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1394, "name": "draw_to_density", "kind": "def", "category": "function", "info": " def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1418, "name": "draw_violins", "kind": "ref", "category": "function", "info": " self.draw_violins(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1419, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1421, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1424, "name": "_CategoricalStatPlotter", "kind": "def", "category": "class", "info": "nested_width\testimate_statistic\tdraw_confints"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1429, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1437, "name": "estimate_statistic", "kind": "def", "category": "function", "info": " def estimate_statistic(self, estimator, errorbar, n_boot, seed):\n\n if self.hue_names is None:\n statistic = []\n confint = []\n else:\n statistic = [[] for _ in self.plot_data]\n confint = [[] for _ in self.plot_data]\n\n var = {\"v\": \"y\", \"h\": \"x\"}[self.orient]\n\n agg = EstimateAggregator(estimator, errorbar, n_boot=n_boot, seed=seed)\n\n for i, group_data in enumerate(self.plot_data):\n\n # Option 1: we have a single layer of grouping\n # --------------------------------------------\n if self.plot_hues is None:\n\n df = pd.DataFrame({var: group_data})\n if self.plot_units is not None:\n df[\"units\"] = self.plot_units[i]\n\n res = agg(df, var)\n\n statistic.append(res[var])\n if errorbar is not None:\n confint.append((res[f\"{var}min\"], res[f\"{var}max\"]))\n\n # Option 2: we are grouping by a hue layer\n # ----------------------------------------\n\n else:\n for hue_level in self.hue_names:\n\n if not self.plot_hues[i].size:\n statistic[i].append(np.nan)\n if errorbar is not None:\n confint[i].append((np.nan, np.nan))\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n df = pd.DataFrame({var: group_data[hue_mask]})\n if self.plot_units is not None:\n df[\"units\"] = self.plot_units[i][hue_mask]\n\n res = agg(df, var)\n\n statistic[i].append(res[var])\n if errorbar is not None:\n confint[i].append((res[f\"{var}min\"], res[f\"{var}max\"]))\n\n # Save the resulting values for plotting\n self.statistic = np.array(statistic)\n self.confint = np.array(confint)\n\n def draw_confints(self, ax, at_group, confint, colors,\n errwidth=None, capsize=None, **kws):\n\n if errwidth is not None:\n kws.setdefault(\"lw\", errwidth)\n else:\n kws.setdefault(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n\n for at, (ci_low, ci_high), color in zip(at_group,\n confint,\n colors):\n if self.orient == \"v\":\n ax.plot([at, at], [ci_low, ci_high], color=color, **kws)\n if capsize is not None:\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_low, ci_low], color=color, **kws)\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_high, ci_high], color=color, **kws)\n else:\n ax.plot([ci_low, ci_high], [at, at], color=color, **kws)\n if capsize is not None:\n ax.plot([ci_low, ci_low],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n ax.plot([ci_high, ci_high],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1448, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " agg = EstimateAggregator(estimator, errorbar, n_boot=n_boot, seed=seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1460, "name": "agg", "kind": "ref", "category": "function", "info": " res = agg(df, var)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1483, "name": "agg", "kind": "ref", "category": "function", "info": " res = agg(df, var)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1493, "name": "draw_confints", "kind": "def", "category": "function", "info": " def draw_confints(self, ax, at_group, confint, colors,\n errwidth=None, capsize=None, **kws):\n\n if errwidth is not None:\n kws.setdefault(\"lw\", errwidth)\n else:\n kws.setdefault(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n\n for at, (ci_low, ci_high), color in zip(at_group,\n confint,\n colors):\n if self.orient == \"v\":\n ax.plot([at, at], [ci_low, ci_high], color=color, **kws)\n if capsize is not None:\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_low, ci_low], color=color, **kws)\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_high, ci_high], color=color, **kws)\n else:\n ax.plot([ci_low, ci_high], [at, at], color=color, **kws)\n if capsize is not None:\n ax.plot([ci_low, ci_low],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n ax.plot([ci_high, ci_high],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1522, "name": "_BarPlotter", "kind": "def", "category": "class", "info": "__init__\tdraw_bars\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1529, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1531, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1532, "name": "estimate_statistic", "kind": "ref", "category": "function", "info": " self.estimate_statistic(estimator, errorbar, n_boot, seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1541, "name": "draw_bars", "kind": "def", "category": "function", "info": " def draw_bars(self, ax, kws):\n \"\"\"Draw the bars onto `ax`.\"\"\"\n # Get the right matplotlib function depending on the orientation\n barfunc = ax.bar if self.orient == \"v\" else ax.barh\n barpos = np.arange(len(self.statistic))\n\n if self.plot_hues is None:\n\n # Draw the bars\n barfunc(barpos, self.statistic, self.width,\n color=self.colors, align=\"center\", **kws)\n\n # Draw the confidence intervals\n errcolors = [self.errcolor] * len(barpos)\n self.draw_confints(ax,\n barpos,\n self.confint,\n errcolors,\n self.errwidth,\n self.capsize)\n\n else:\n\n for j, hue_level in enumerate(self.hue_names):\n\n # Draw the bars\n offpos = barpos + self.hue_offsets[j]\n barfunc(offpos, self.statistic[:, j], self.nested_width,\n color=self.colors[j], align=\"center\",\n label=hue_level, **kws)\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = self.confint[:, j]\n errcolors = [self.errcolor] * len(offpos)\n self.draw_confints(ax,\n offpos,\n confint,\n errcolors,\n self.errwidth,\n self.capsize)\n\n def plot(self, ax, bar_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_bars(ax, bar_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1550, "name": "barfunc", "kind": "ref", "category": "function", "info": " barfunc(barpos, self.statistic, self.width,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1555, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1568, "name": "barfunc", "kind": "ref", "category": "function", "info": " barfunc(offpos, self.statistic[:, j], self.nested_width,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1576, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1585, "name": "draw_bars", "kind": "ref", "category": "function", "info": " self.draw_bars(ax, bar_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1586, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1588, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1591, "name": "_PointPlotter", "kind": "def", "category": "class", "info": "__init__\thue_offsets\tdraw_points\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1600, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1602, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1603, "name": "estimate_statistic", "kind": "ref", "category": "function", "info": " self.estimate_statistic(estimator, errorbar, n_boot, seed)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1607, "name": "color_palette", "kind": "ref", "category": "function", "info": " self.colors = [color_palette()[0]] * len(self.colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1635, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1644, "name": "draw_points", "kind": "def", "category": "function", "info": " def draw_points(self, ax):\n \"\"\"Draw the main data components of the plot.\"\"\"\n # Get the center positions on the categorical axis\n pointpos = np.arange(len(self.statistic))\n\n # Get the size of the plot elements\n lw = mpl.rcParams[\"lines.linewidth\"] * 1.8 * self.scale\n mew = lw * .75\n markersize = np.pi * np.square(lw) * 2\n\n if self.plot_hues is None:\n\n # Draw lines joining each estimate point\n if self.join:\n color = self.colors[0]\n ls = self.linestyles[0]\n if self.orient == \"h\":\n ax.plot(self.statistic, pointpos,\n color=color, ls=ls, lw=lw)\n else:\n ax.plot(pointpos, self.statistic,\n color=color, ls=ls, lw=lw)\n\n # Draw the confidence intervals\n self.draw_confints(ax, pointpos, self.confint, self.colors,\n self.errwidth, self.capsize)\n\n # Draw the estimate points\n marker = self.markers[0]\n colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]\n if self.orient == \"h\":\n x, y = self.statistic, pointpos\n else:\n x, y = pointpos, self.statistic\n ax.scatter(x, y,\n linewidth=mew, marker=marker, s=markersize,\n facecolor=colors, edgecolor=colors)\n\n else:\n\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Determine the values to plot for this level\n statistic = self.statistic[:, j]\n\n # Determine the position on the categorical and z axes\n offpos = pointpos + offsets[j]\n z = j + 1\n\n # Draw lines joining each estimate point\n if self.join:\n color = self.colors[j]\n ls = self.linestyles[j]\n if self.orient == \"h\":\n ax.plot(statistic, offpos, color=color,\n zorder=z, ls=ls, lw=lw)\n else:\n ax.plot(offpos, statistic, color=color,\n zorder=z, ls=ls, lw=lw)\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = self.confint[:, j]\n errcolors = [self.colors[j]] * len(offpos)\n self.draw_confints(ax, offpos, confint, errcolors,\n self.errwidth, self.capsize,\n zorder=z)\n\n # Draw the estimate points\n n_points = len(remove_na(offpos))\n marker = self.markers[j]\n color = mpl.colors.colorConverter.to_rgb(self.colors[j])\n\n if self.orient == \"h\":\n x, y = statistic, offpos\n else:\n x, y = offpos, statistic\n\n if not len(remove_na(statistic)):\n x = y = [np.nan] * n_points\n\n ax.scatter(x, y, label=hue_level,\n facecolor=color, edgecolor=color,\n linewidth=mew, marker=marker, s=markersize,\n zorder=z)\n\n def plot(self, ax):\n \"\"\"Make the plot.\"\"\"\n self.draw_points(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1668, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax, pointpos, self.confint, self.colors,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1673, "name": "to_rgb", "kind": "ref", "category": "function", "info": " colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1709, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax, offpos, confint, errcolors,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1714, "name": "remove_na", "kind": "ref", "category": "function", "info": " n_points = len(remove_na(offpos))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1716, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.colorConverter.to_rgb(self.colors[j])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1723, "name": "remove_na", "kind": "ref", "category": "function", "info": " if not len(remove_na(statistic)):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1733, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1734, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1736, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1739, "name": "_CountPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1743, "name": "_LVPlotter", "kind": "def", "category": "class", "info": "__init__\t_lv_box_ends\t_lv_outliers\t_width_functions\t_lvplot\tdraw_letter_value_plot\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1783, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1784, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1786, "name": "_lv_box_ends", "kind": "def", "category": "function", "info": " def _lv_box_ends(self, vals):\n \"\"\"Get the number of data points and calculate `depth` of\n letter-value plot.\"\"\"\n vals = np.asarray(vals)\n # Remove infinite values while handling a 'object' dtype\n # that can come from pd.Float64Dtype() input\n with pd.option_context('mode.use_inf_as_null', True):\n vals = vals[~pd.isnull(vals)]\n n = len(vals)\n p = self.outlier_prop\n\n # Select the depth, i.e. number of boxes to draw, based on the method\n if self.k_depth == 'full':\n # extend boxes to 100% of the data\n k = int(np.log2(n)) + 1\n elif self.k_depth == 'tukey':\n # This results with 5-8 points in each tail\n k = int(np.log2(n)) - 3\n elif self.k_depth == 'proportion':\n k = int(np.log2(n)) - int(np.log2(n * p)) + 1\n elif self.k_depth == 'trustworthy':\n point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n k = int(np.log2(n / point_conf)) + 1\n else:\n k = int(self.k_depth) # allow having k as input\n # If the number happens to be less than 1, set k to 1\n if k < 1:\n k = 1\n\n # Calculate the upper end for each of the k boxes\n upper = [100 * (1 - 0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Calculate the lower end for each of the k boxes\n lower = [100 * (0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Stitch the box ends together\n percentile_ends = [(i, j) for i, j in zip(lower, upper)]\n box_ends = [np.percentile(vals, q) for q in percentile_ends]\n return box_ends, k\n\n def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1807, "name": "_normal_quantile_func", "kind": "ref", "category": "function", "info": " point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1824, "name": "_lv_outliers", "kind": "def", "category": "function", "info": " def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1833, "name": "_width_functions", "kind": "def", "category": "function", "info": " def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1840, "name": "_lvplot", "kind": "def", "category": "function", "info": " def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1891, "name": "_lv_box_ends", "kind": "ref", "category": "function", "info": " box_ends, k = self._lv_box_ends(box_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1895, "name": "_width_functions", "kind": "ref", "category": "function", "info": " width = self._width_functions(self.scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1898, "name": "height", "kind": "def", "category": "function", "info": " def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1902, "name": "vert_perc_box", "kind": "def", "category": "function", "info": " def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1905, "name": "height", "kind": "ref", "category": "function", "info": " height(b), fill=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1908, "name": "horz_perc_box", "kind": "def", "category": "function", "info": " def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1910, "name": "height", "kind": "ref", "category": "function", "info": " height(b), widths * w,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1915, "name": "width", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1915, "name": "height", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1925, "name": "_lv_outliers", "kind": "ref", "category": "function", "info": " outliers = self._lv_outliers(box_data, k)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1926, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex_color = mpl.colors.rgb2hex(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1957, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1959, "name": "cmap", "kind": "ref", "category": "function", "info": " rgb = [hex_color, cmap(.85)]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1960, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1965, "name": "box_func", "kind": "ref", "category": "function", "info": " boxes = [box_func(x, b[0], i, k, b[1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1974, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(collection)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1976, "name": "draw_letter_value_plot", "kind": "def", "category": "function", "info": " def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1990, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1998, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2014, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2021, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2029, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2039, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2043, "name": "draw_letter_value_plot", "kind": "ref", "category": "function", "info": " self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2044, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2046, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2229, "name": "_BoxPlotter", "kind": "ref", "category": "function", "info": " plotter = _BoxPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2303, "name": "_ViolinPlotter", "kind": "ref", "category": "function", "info": " plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2397, "name": "boxenplot", "kind": "def", "category": "function", "info": "def boxenplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75,\n width=.8, dodge=True, k_depth='tukey', linewidth=None,\n scale='exponential', outlier_prop=0.007, trust_alpha=0.05,\n showfliers=True,\n ax=None, box_kws=None, flier_kws=None, line_kws=None,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2405, "name": "_LVPlotter", "kind": "ref", "category": "function", "info": " plotter = _LVPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2493, "name": "stripplot", "kind": "def", "category": "function", "info": "def stripplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n jitter=True, dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0,\n hue_norm=None, native_scale=False, formatter=None, legend=\"auto\",\n ax=None, **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2501, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2503, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2514, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2516, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2518, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2519, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2521, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2523, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2535, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2546, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2547, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2614, "name": "swarmplot", "kind": "def", "category": "function", "info": "def swarmplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0, hue_norm=None,\n native_scale=False, formatter=None, legend=\"auto\", warn_thresh=.05,\n ax=None, **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2622, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2624, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2635, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2637, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2642, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2643, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2645, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2647, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2661, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2669, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2670, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2736, "name": "barplot", "kind": "def", "category": "function", "info": "def barplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, units=None, seed=None,\n orient=None, color=None, palette=None, saturation=.75, width=.8,\n errcolor=\".26\", errwidth=None, capsize=None, dodge=True, ci=\"deprecated\",\n ax=None,\n **kwargs,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2745, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2752, "name": "_BarPlotter", "kind": "ref", "category": "function", "info": " plotter = _BarPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2826, "name": "pointplot", "kind": "def", "category": "function", "info": "def pointplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, units=None, seed=None,\n markers=\"o\", linestyles=\"-\", dodge=False, join=True, scale=1,\n orient=None, color=None, palette=None, errwidth=None, ci=\"deprecated\",\n capsize=None, ax=None,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2834, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2836, "name": "_PointPlotter", "kind": "ref", "category": "function", "info": " plotter = _PointPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2914, "name": "countplot", "kind": "def", "category": "function", "info": "def countplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75, width=.8,\n dodge=True, ax=None, **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2938, "name": "_CountPlotter", "kind": "ref", "category": "function", "info": " plotter = _CountPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2998, "name": "catplot", "kind": "def", "category": "function", "info": "def catplot(\n data=None, *, x=None, y=None, hue=None, row=None, col=None,\n col_wrap=None, estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000,\n units=None, seed=None, order=None, hue_order=None, row_order=None,\n col_order=None, height=5, aspect=1, kind=\"strip\", native_scale=False,\n formatter=None, orient=None, color=None, palette=None, hue_norm=None,\n legend=\"auto\", legend_out=True, sharex=True, sharey=True,\n margin_titles=False, facet_kws=None, ci=\"deprecated\",\n **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3026, "name": "_CategoricalFacetPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalFacetPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3028, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalFacetPlotter.get_semantics(locals()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3043, "name": "rename", "kind": "ref", "category": "function", "info": " data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3044, "name": "duplicated", "kind": "ref", "category": "function", "info": " data = data.loc[:, ~data.columns.duplicated()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3052, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3067, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3069, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3074, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3075, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3076, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3097, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3121, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3131, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3133, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3137, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3142, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " g._update_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3146, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3180, "name": "_CategoricalPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotter()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3182, "name": "establish_variables", "kind": "ref", "category": "function", "info": " p.establish_variables(x_, y_, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3206, "name": "establish_colors", "kind": "ref", "category": "function", "info": " p.establish_colors(color, palette, 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3229, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3236, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(**facet_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3239, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(plot_func, x=x, y=y, hue=hue, **plot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3242, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.value_label, p.group_label)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3244, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.group_label, p.value_label)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3249, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(x_var=\"count\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3251, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(y_var=\"count\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3255, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3347, "name": "Beeswarm", "kind": "def", "category": "class", "info": "__init__\t__call__\tbeeswarm\tcould_overlap\tposition_candidates\tfirst_non_overlapping_candidate\tadd_gutters"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3375, "name": "transform", "kind": "ref", "category": "function", "info": " orig_xy = ax.transData.transform(orig_xy_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3382, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3385, "name": "item", "kind": "ref", "category": "function", "info": " edge = points.get_linewidth().item()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3395, "name": "beeswarm", "kind": "ref", "category": "function", "info": " new_xyr[sorter] = self.beeswarm(orig_xyr)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3402, "name": "inverted", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3402, "name": "transform", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3409, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_y_data, center, log_scale=log_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3411, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_x_data, center, log_scale=log_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3419, "name": "beeswarm", "kind": "def", "category": "function", "info": " def beeswarm(self, orig_xyr):\n \"\"\"Adjust x position of points to avoid overlaps.\"\"\"\n # In this method, `x` is always the categorical axis\n # Center of the swarm, in point coordinates\n midline = orig_xyr[0, 0]\n\n # Start the swarm with the first point\n swarm = np.atleast_2d(orig_xyr[0])\n\n # Loop over the remaining points\n for xyr_i in orig_xyr[1:]:\n\n # Find the points in the swarm that could possibly\n # overlap with the point we are currently placing\n neighbors = self.could_overlap(xyr_i, swarm)\n\n # Find positions that would be valid individually\n # with respect to each of the swarm neighbors\n candidates = self.position_candidates(xyr_i, neighbors)\n\n # Sort candidates by their centrality\n offsets = np.abs(candidates[:, 0] - midline)\n candidates = candidates[np.argsort(offsets)]\n\n # Find the first candidate that does not overlap any neighbors\n new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n\n # Place it into the swarm\n swarm = np.vstack([swarm, new_xyr_i])\n\n return swarm\n\n def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3433, "name": "could_overlap", "kind": "ref", "category": "function", "info": " neighbors = self.could_overlap(xyr_i, swarm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3437, "name": "position_candidates", "kind": "ref", "category": "function", "info": " candidates = self.position_candidates(xyr_i, neighbors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3444, "name": "first_non_overlapping_candidate", "kind": "ref", "category": "function", "info": " new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3451, "name": "could_overlap", "kind": "def", "category": "function", "info": " def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3465, "name": "position_candidates", "kind": "def", "category": "function", "info": " def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3482, "name": "first_non_overlapping_candidate", "kind": "def", "category": "function", "info": " def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3515, "name": "add_gutters", "kind": "def", "category": "function", "info": " def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/cm.py", "rel_fname": "seaborn/cm.py", "line": 1582, "name": "register_colormap", "kind": "ref", "category": "function", "info": " register_colormap(_name, _cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/cm.py", "rel_fname": "seaborn/cm.py", "line": 1583, "name": "register_colormap", "kind": "ref", "category": "function", "info": " register_colormap(_name + \"_r\", _cmap_r)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 83, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 85, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 86, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " dist=DocstringComponents(_dist_params),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 87, "name": "from_function_params", "kind": "ref", "category": "function", "info": " kde=DocstringComponents.from_function_params(KDE.__init__),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 88, "name": "from_function_params", "kind": "ref", "category": "function", "info": " hist=DocstringComponents.from_function_params(Histogram.__init__),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 89, "name": "from_function_params", "kind": "ref", "category": "function", "info": " ecdf=DocstringComponents.from_function_params(ECDF.__init__),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 98, "name": "_DistributionPlotter", "kind": "def", "category": "class", "info": "__init__\tunivariate\tdata_variable\thas_xy_data\t_add_legend\t_artist_kws\t_quantile_to_level\t_cmap_from_color\t_default_discrete\t_resolve_multiple\t_compute_univariate_density\tplot_univariate_histogram\tplot_bivariate_histogram\tplot_univariate_density\tplot_bivariate_density\tplot_univariate_ecdf\tplot_rug\t_plot_single_rug"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 114, "name": "univariate", "kind": "def", "category": "function", "info": " def univariate(self):\n \"\"\"Return True if only x or y are used.\"\"\"\n # TODO this could go down to core, but putting it here now.\n # We'd want to be conceptually clear that univariate only applies\n # to x/y and not to other semantics, which can exist.\n # We haven't settled on a good conceptual name for x/y.\n return bool({\"x\", \"y\"} - set(self.variables))\n\n @property\n def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 123, "name": "data_variable", "kind": "def", "category": "function", "info": " def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 131, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 136, "name": "_add_legend", "kind": "def", "category": "function", "info": " def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 145, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 147, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " kws = self._artist_kws(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 157, "name": "artist", "kind": "ref", "category": "function", "info": " handles.append(artist(**kws))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 164, "name": "add_legend", "kind": "ref", "category": "function", "info": " ax_obj.add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 171, "name": "_artist_kws", "kind": "def", "category": "function", "info": " def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 175, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 194, "name": "_quantile_to_level", "kind": "def", "category": "function", "info": " def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 204, "name": "_cmap_from_color", "kind": "def", "category": "function", "info": " def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 209, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, _ = husl.rgb_to_husl(r, g, b)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 215, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 216, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(colors[::-1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 218, "name": "_default_discrete", "kind": "def", "category": "function", "info": " def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 228, "name": "_resolve_multiple", "kind": "def", "category": "function", "info": " def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for cols in column_groups.values():\n\n norm_constant = curves.iloc[:, cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves.iloc[:, cols] = curves.iloc[:, cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves.iloc[:, cols] = (curves\n .iloc[:, cols]\n .div(norm_constant, axis=\"index\"))\n\n # Define where each segment starts\n baselines.iloc[:, cols] = (curves\n .iloc[:, cols]\n .shift(1, axis=1)\n .fillna(0))\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 248, "name": "tolist", "kind": "ref", "category": "function", "info": " for i, keyd in enumerate(map(dict, curves.columns.tolist())):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 265, "name": "div", "kind": "ref", "category": "function", "info": " .div(norm_constant, axis=\"index\"))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 270, "name": "shift", "kind": "ref", "category": "function", "info": " .shift(1, axis=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 271, "name": "fillna", "kind": "ref", "category": "function", "info": " .fillna(0))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 281, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = curves[key].reset_index(name=\"heights\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 283, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 295, "name": "set_index", "kind": "ref", "category": "function", "info": " curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 303, "name": "_compute_univariate_density", "kind": "def", "category": "function", "info": " def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 314, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 318, "name": "dropna", "kind": "ref", "category": "function", "info": " all_observations = self.comp_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 319, "name": "define_support", "kind": "ref", "category": "function", "info": " estimator.define_support(all_observations[data_variable])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 323, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 331, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 351, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(observations, weights=weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 381, "name": "plot_univariate_histogram", "kind": "def", "category": "function", "info": " def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n if set(self.variables) - {\"x\", \"y\"}: # Check if we'll have multiple histograms\n if common_bins:\n estimator.define_bin_params(\n all_data[self.data_variable], weights=all_weights\n )\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n observations = sub_data[self.data_variable]\n\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Do the histogram computation\n heights, edges = estimator(observations, weights=weights)\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * np.diff(edges)).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n orig_widths = np.diff(edges)\n widths = shrink * orig_widths\n edges = edges[:-1] + (1 - shrink) / 2 * orig_widths\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 404, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 405, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 426, "name": "Histogram", "kind": "ref", "category": "function", "info": " estimator = Histogram(**estimate_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 430, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 435, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " estimator.define_bin_params(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 451, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 452, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 462, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 476, "name": "estimator", "kind": "ref", "category": "function", "info": " heights, edges = estimator(observations, weights=weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 488, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 495, "name": "from_arrays", "kind": "ref", "category": "function", "info": " index = pd.MultiIndex.from_arrays([\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 509, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " histograms, baselines = self._resolve_multiple(histograms, multiple)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 511, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, _ = self._resolve_multiple(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 519, "name": "to_frame", "kind": "ref", "category": "function", "info": " bin_vals = histograms.index.to_frame()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 524, "name": "idxmax", "kind": "ref", "category": "function", "info": " edges.max() + widths.loc[edges.idxmax()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 549, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 552, "name": "rename", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 552, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 555, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 559, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 563, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 572, "name": "plot_func", "kind": "ref", "category": "function", "info": " artists = plot_func(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 668, "name": "to_frame", "kind": "ref", "category": "function", "info": " h.index.to_frame() for _, h in histograms.items()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 669, "name": "reset_index", "kind": "ref", "category": "function", "info": " ]).reset_index(drop=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 670, "name": "idxmin", "kind": "ref", "category": "function", "info": " thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 678, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 680, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 684, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 688, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([left_edge + binwidth] * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 689, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([left_edge] * 2)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 726, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 737, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 741, "name": "plot_bivariate_histogram", "kind": "def", "category": "function", "info": " def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 755, "name": "Histogram", "kind": "ref", "category": "function", "info": " estimator = Histogram(**estimate_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 759, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 761, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " estimator.define_bin_params(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 772, "name": "iter_data", "kind": "ref", "category": "function", "info": " for _, sub_data in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 773, "name": "estimator", "kind": "ref", "category": "function", "info": " sub_heights, _ = estimator(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 781, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(full_heights, pthresh)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 786, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(full_heights, pmax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 798, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 804, "name": "estimator", "kind": "ref", "category": "function", "info": " heights, (x_edges, y_edges) = estimator(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 811, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 813, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 823, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 824, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 829, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 831, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 836, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(heights, pmax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 842, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(heights, pthresh)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 844, "name": "masked_less_equal", "kind": "ref", "category": "function", "info": " heights = np.ma.masked_less_equal(heights, thresh)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 847, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 851, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 852, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 883, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 894, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 898, "name": "plot_univariate_density", "kind": "def", "category": "function", "info": " def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 920, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " plot_kws = _normalize_kwargs(plot_kws, artist)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 923, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 931, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 934, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 944, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, baselines = self._resolve_multiple(densities, multiple)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 967, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 978, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 981, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 985, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1019, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1029, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1033, "name": "plot_bivariate_density", "kind": "def", "category": "function", "info": " def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Get a default single color from the attribute cycle\n if self.ax is None:\n default_color = \"C0\" if color is None else color\n else:\n scout, = self.ax.plot([], color=color)\n default_color = scout.get_color()\n scout.remove()\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(default_color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [default_color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1051, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1056, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1061, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1065, "name": "fillna", "kind": "ref", "category": "function", "info": " min_variance = observations.var().fillna(0).min()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1078, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(*observations, weights=weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1095, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1097, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1120, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " common_levels = self._quantile_to_level(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1126, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " k: self._quantile_to_level(d, levels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1150, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(default_color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1158, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1163, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1166, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1168, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " contour_kws[\"cmap\"] = self._cmap_from_color(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1172, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1190, "name": "contour_func", "kind": "ref", "category": "function", "info": " cset = contour_func(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1208, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1223, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1227, "name": "plot_univariate_ecdf", "kind": "def", "category": "function", "info": " def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1229, "name": "ECDF", "kind": "ref", "category": "function", "info": " estimator = ECDF(**estimate_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1236, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1246, "name": "estimator", "kind": "ref", "category": "function", "info": " stat, vals = estimator(observations, weights=weights)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1251, "name": "_hue_map", "kind": "ref", "category": "function", "info": " artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1255, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1273, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1286, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1292, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1296, "name": "plot_rug", "kind": "def", "category": "function", "info": " def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1298, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1300, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1317, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1319, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1322, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1326, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1330, "name": "_plot_single_rug", "kind": "def", "category": "function", "info": " def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1337, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1342, "name": "_hue_map", "kind": "ref", "category": "function", "info": " colors = self._hue_map(sub_data[\"hue\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1363, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(LineCollection(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1367, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1370, "name": "_DistributionFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1379, "name": "histplot", "kind": "def", "category": "function", "info": "def histplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Histogram computation parameters\n stat=\"count\", bins=\"auto\", binwidth=None, binrange=None,\n discrete=None, cumulative=False, common_bins=True, common_norm=True,\n # Histogram appearance parameters\n multiple=\"layer\", element=\"bars\", fill=True, shrink=1,\n # Histogram smoothing with a kernel density estimate\n kde=False, kde_kws=None, line_kws=None,\n # Bivariate histogram parameters\n thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1400, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1402, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1405, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1410, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1417, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1424, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " discrete = p._default_discrete()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1437, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1455, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1602, "name": "kdeplot", "kind": "def", "category": "function", "info": "def kdeplot(\n data=None, *, x=None, y=None, hue=None, weights=None,\n palette=None, hue_order=None, hue_norm=None, color=None, fill=None,\n multiple=\"layer\", common_norm=True, common_grid=False, cumulative=False,\n bw_method=\"scott\", bw_adjust=1, warn_singular=True, log_scale=None,\n levels=10, thresh=.05, gridsize=200, cut=3, clip=None,\n legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,\n **kwargs,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1690, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1692, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1695, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1700, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, allowed_types=[\"numeric\", \"datetime\"], log_scale=log_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1703, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1722, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1736, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1882, "name": "ecdfplot", "kind": "def", "category": "function", "info": "def ecdfplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Computation parameters\n stat=\"proportion\", complementary=False,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1896, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1898, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1901, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1912, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1915, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1929, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1994, "name": "rugplot", "kind": "def", "category": "function", "info": "def rugplot(\n data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,\n palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2052, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2054, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2056, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2061, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2064, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2069, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(height, expand_margins, legend, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2116, "name": "displot", "kind": "def", "category": "function", "info": "def displot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, row=None, col=None, weights=None,\n # Other plot parameters\n kind=\"hist\", rug=False, rug_kws=None, log_scale=None, legend=True,\n # Hue-mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Faceting parameters\n col_wrap=None, row_order=None, col_order=None,\n height=5, aspect=1, facet_kws=None,\n **kwargs,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2130, "name": "_DistributionFacetPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionFacetPlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2132, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionFacetPlotter.get_semantics(locals())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2135, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2137, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", [\"hist\", \"kde\", \"ecdf\"], kind)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2156, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2157, "name": "duplicated", "kind": "ref", "category": "function", "info": " grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2165, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2178, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g, allowed_types=allowed_types, log_scale=log_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2198, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2206, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " estimate_kws[\"discrete\"] = p._default_discrete()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2214, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2215, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(**hist_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2219, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2220, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(**hist_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2228, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2239, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2240, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(**kde_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2244, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2245, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(**kde_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2254, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2263, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2264, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(**ecdf_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2275, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(rug_kws, p.plot_rug, rugplot)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2279, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(**rug_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2283, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2284, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " x_var=p.variables.get(\"x\", g.axes.flat[0].get_xlabel()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2285, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " y_var=p.variables.get(\"y\", g.axes.flat[0].get_ylabel()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2287, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2303, "name": "rename", "kind": "ref", "category": "function", "info": " g.data = p.plot_data.rename(columns=wide_cols)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2395, "name": "_freedman_diaconis_bins", "kind": "def", "category": "function", "info": "def _freedman_diaconis_bins(a):\n \"\"\"Calculate number of hist bins using Freedman-Diaconis rule.\"\"\"\n # From https://stats.stackexchange.com/questions/798/\n a = np.asarray(a)\n if len(a) < 2:\n return 1\n iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n h = 2 * iqr / (len(a) ** (1 / 3))\n # fall back to sqrt(a) bins if iqr is 0\n if h == 0:\n return int(np.sqrt(a.size))\n else:\n return int(np.ceil((a.max() - a.min()) / h))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2401, "name": "reduce", "kind": "ref", "category": "function", "info": " iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2410, "name": "distplot", "kind": "def", "category": "function", "info": "def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,\n hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,\n color=None, vertical=False, norm_hist=False, axlabel=None,\n label=None, ax=None, x=None):\n \"\"\"\n DEPRECATED\n\n This function has been deprecated and will be removed in seaborn v0.14.0.\n It has been replaced by :func:`histplot` and :func:`displot`, two functions\n with a modern API and many more capabilities.\n\n For a guide to updating, please see this notebook:\n\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n\n \"\"\"\n\n if kde and not hist:\n axes_level_suggestion = (\n \"`kdeplot` (an axes-level function for kernel density plots)\"\n )\n else:\n axes_level_suggestion = (\n \"`histplot` (an axes-level function for histograms)\"\n )\n\n msg = textwrap.dedent(f\"\"\"\n\n `distplot` is a deprecated function and will be removed in seaborn v0.14.0.\n\n Please adapt your code to use either `displot` (a figure-level function with\n similar flexibility) or {axes_level_suggestion}.\n\n For a guide to updating your code to use the new functions, please see\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n \"\"\")\n warnings.warn(msg, UserWarning, stacklevel=2)\n\n if ax is None:\n ax = plt.gca()\n\n # Intelligently label the support axis\n label_ax = bool(axlabel)\n if axlabel is None and hasattr(a, \"name\"):\n axlabel = a.name\n if axlabel is not None:\n label_ax = True\n\n # Support new-style API\n if x is not None:\n a = x\n\n # Make a a 1-d float array\n a = np.asarray(a, float)\n if a.ndim > 1:\n a = a.squeeze()\n\n # Drop null values from array\n a = remove_na(a)\n\n # Decide if the hist is normed\n norm_hist = norm_hist or kde or (fit is not None)\n\n # Handle dictionary defaults\n hist_kws = {} if hist_kws is None else hist_kws.copy()\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n rug_kws = {} if rug_kws is None else rug_kws.copy()\n fit_kws = {} if fit_kws is None else fit_kws.copy()\n\n # Get the color from the current color cycle\n if color is None:\n if vertical:\n line, = ax.plot(0, a.mean())\n else:\n line, = ax.plot(a.mean(), 0)\n color = line.get_color()\n line.remove()\n\n # Plug the label into the right kwarg dictionary\n if label is not None:\n if hist:\n hist_kws[\"label\"] = label\n elif kde:\n kde_kws[\"label\"] = label\n elif rug:\n rug_kws[\"label\"] = label\n elif fit:\n fit_kws[\"label\"] = label\n\n if hist:\n if bins is None:\n bins = min(_freedman_diaconis_bins(a), 50)\n hist_kws.setdefault(\"alpha\", 0.4)\n hist_kws.setdefault(\"density\", norm_hist)\n\n orientation = \"horizontal\" if vertical else \"vertical\"\n hist_color = hist_kws.pop(\"color\", color)\n ax.hist(a, bins, orientation=orientation,\n color=hist_color, **hist_kws)\n if hist_color != color:\n hist_kws[\"color\"] = hist_color\n\n axis = \"y\" if vertical else \"x\"\n\n if kde:\n kde_color = kde_kws.pop(\"color\", color)\n kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n if kde_color != color:\n kde_kws[\"color\"] = kde_color\n\n if rug:\n rug_color = rug_kws.pop(\"color\", color)\n rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n if rug_color != color:\n rug_kws[\"color\"] = rug_color\n\n if fit is not None:\n\n def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2468, "name": "remove_na", "kind": "ref", "category": "function", "info": " a = remove_na(a)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2501, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " bins = min(_freedman_diaconis_bins(a), 50)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2516, "name": "kdeplot", "kind": "ref", "category": "function", "info": " kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2522, "name": "rugplot", "kind": "ref", "category": "function", "info": " rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2528, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2529, "name": "pdf", "kind": "ref", "category": "function", "info": " return fit.pdf(x, *params)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2535, "name": "gaussian_kde", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2535, "name": "scotts_factor", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2536, "name": "_kde_support", "kind": "ref", "category": "function", "info": " x = _kde_support(a, bw, gridsize, cut, clip)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2537, "name": "fit", "kind": "ref", "category": "function", "info": " params = fit.fit(a)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2538, "name": "pdf", "kind": "ref", "category": "function", "info": " y = pdf(x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2547, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(axlabel)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2549, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(axlabel)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 72, "name": "user_cache_dir", "kind": "def", "category": "function", "info": "def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):\n r\"\"\"Return full path to the user-specific cache dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"opinion\" (boolean) can be False to disable the appending of\n \"Cache\" to the base app data dir for Windows. See\n discussion below.\n\n Typical user cache directories are:\n Mac OS X: ~/Library/Caches/\n Unix: ~/.cache/ (XDG default)\n Win XP: C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\\Cache\n Vista: C:\\Users\\\\AppData\\Local\\\\\\Cache\n\n On Windows the only suggestion in the MSDN docs is that local settings go in\n the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming\n app data dir (the default returned by `user_data_dir` above). Apps typically\n put cache data somewhere *under* the given dir here. Some examples:\n ...\\Mozilla\\Firefox\\Profiles\\\\Cache\n ...\\Acme\\SuperApp\\Cache\\1.0\n OPINION: This function appends \"Cache\" to the `CSIDL_LOCAL_APPDATA` value.\n This can be disabled with the `opinion=False` option.\n \"\"\"\n if system == \"win32\":\n if appauthor is None:\n appauthor = appname\n path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n if opinion:\n path = os.path.join(path, \"Cache\")\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Caches')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 108, "name": "normpath", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 108, "name": "_get_win_folder", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 117, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('~/Library/Caches')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 121, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 131, "name": "_get_win_folder_from_registry", "kind": "def", "category": "function", "info": "def _get_win_folder_from_registry(csidl_name):\n \"\"\"This is a fallback technique at best. I'm not sure if using the\n registry for this guarantees us the correct answer for all CSIDL_*\n names.\n \"\"\"\n import winreg as _winreg\n\n shell_folder_name = {\n \"CSIDL_APPDATA\": \"AppData\",\n \"CSIDL_COMMON_APPDATA\": \"Common AppData\",\n \"CSIDL_LOCAL_APPDATA\": \"Local AppData\",\n }[csidl_name]\n\n key = _winreg.OpenKey(\n _winreg.HKEY_CURRENT_USER,\n r\"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\"\n )\n dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n return dir\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 144, "name": "OpenKey", "kind": "ref", "category": "function", "info": " key = _winreg.OpenKey(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 148, "name": "QueryValueEx", "kind": "ref", "category": "function", "info": " dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 152, "name": "_get_win_folder_with_pywin32", "kind": "def", "category": "function", "info": "def _get_win_folder_with_pywin32(csidl_name):\n from win32com.shell import shellcon, shell\n dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n # Try to make this a unicode path because SHGetFolderPath does\n # not return unicode strings when there is unicode data in the\n # path.\n try:\n dir = unicode(dir)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n try:\n import win32api\n dir = win32api.GetShortPathName(dir)\n except ImportError:\n pass\n except UnicodeError:\n pass\n return dir\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 154, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 159, "name": "unicode", "kind": "ref", "category": "function", "info": " dir = unicode(dir)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 171, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " dir = win32api.GetShortPathName(dir)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 179, "name": "_get_win_folder_with_ctypes", "kind": "def", "category": "function", "info": "def _get_win_folder_with_ctypes(csidl_name):\n import ctypes\n\n csidl_const = {\n \"CSIDL_APPDATA\": 26,\n \"CSIDL_COMMON_APPDATA\": 35,\n \"CSIDL_LOCAL_APPDATA\": 28,\n }[csidl_name]\n\n buf = ctypes.create_unicode_buffer(1024)\n ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in buf:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf2 = ctypes.create_unicode_buffer(1024)\n if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n buf = buf2\n\n return buf.value\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 189, "name": "SHGetFolderPathW", "kind": "ref", "category": "function", "info": " ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 200, "name": "GetShortPathNameW", "kind": "ref", "category": "function", "info": " if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 205, "name": "_get_win_folder_with_jna", "kind": "def", "category": "function", "info": "def _get_win_folder_with_jna(csidl_name):\n import array\n from com.sun import jna\n from com.sun.jna.platform import win32\n\n buf_size = win32.WinDef.MAX_PATH * 2\n buf = array.zeros('c', buf_size)\n shell = win32.Shell32.INSTANCE\n shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf = array.zeros('c', buf_size)\n kernel = win32.Kernel32.INSTANCE\n if kernel.GetShortPathName(dir, buf, buf_size):\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n return dir\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 211, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 213, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 214, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 214, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 224, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 226, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " if kernel.GetShortPathName(dir, buf, buf_size):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 227, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 227, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 39, "name": "strip_blank_lines", "kind": "def", "category": "function", "info": "def strip_blank_lines(l):\n \"Remove leading and trailing blank lines from a list of lines\"\n while l and not l[0].strip():\n del l[0]\n while l and not l[-1].strip():\n del l[-1]\n return l\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 48, "name": "Reader", "kind": "def", "category": "class", "info": "__init__\t__getitem__\treset\tread\tseek_next_non_empty_line\teof\tread_to_condition\tread_to_next_empty_line\tread_to_next_unindented_line\tpeek\tis_empty"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 65, "name": "reset", "kind": "ref", "category": "function", "info": " self.reset()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 70, "name": "reset", "kind": "def", "category": "function", "info": " def reset(self):\n self._l = 0 # current line nr\n\n def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 73, "name": "read", "kind": "def", "category": "function", "info": " def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 74, "name": "eof", "kind": "ref", "category": "function", "info": " if not self.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 81, "name": "seek_next_non_empty_line", "kind": "def", "category": "function", "info": " def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 88, "name": "eof", "kind": "def", "category": "function", "info": " def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 91, "name": "read_to_condition", "kind": "def", "category": "function", "info": " def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 94, "name": "condition_func", "kind": "ref", "category": "function", "info": " if condition_func(line):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 97, "name": "eof", "kind": "ref", "category": "function", "info": " if self.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 101, "name": "read_to_next_empty_line", "kind": "def", "category": "function", "info": " def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 102, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self.seek_next_non_empty_line()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 104, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 107, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_empty)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 109, "name": "read_to_next_unindented_line", "kind": "def", "category": "function", "info": " def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 110, "name": "is_unindented", "kind": "def", "category": "function", "info": " def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 112, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_unindented)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 114, "name": "peek", "kind": "def", "category": "function", "info": " def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 120, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 124, "name": "ParseError", "kind": "def", "category": "class", "info": "__str__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 135, "name": "NumpyDocString", "kind": "def", "category": "class", "info": "__init__\t__getitem__\t__setitem__\t__iter__\t__len__\t_is_at_section\t_strip\t_read_to_next_section\t_read_sections\t_parse_param_list\t_parse_see_also\t_parse_index\t_parse_summary\t_parse\t_error_location\t_str_header\t_str_indent\t_str_signature\t_str_summary\t_str_extended_summary\t_str_param_list\t_str_section\t_str_see_also\t_str_index\t__str__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 167, "name": "Reader", "kind": "ref", "category": "function", "info": " self._doc = Reader(docstring)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 171, "name": "_parse", "kind": "ref", "category": "function", "info": " self._parse()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 181, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"Unknown section {key}\", error=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 191, "name": "_is_at_section", "kind": "def", "category": "function", "info": " def _is_at_section(self):\n self._doc.seek_next_non_empty_line()\n\n if self._doc.eof():\n return False\n\n l1 = self._doc.peek().strip() # e.g. Parameters\n\n if l1.startswith('.. index::'):\n return True\n\n l2 = self._doc.peek(1).strip() # ---------- or ==========\n return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))\n\n def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 192, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self._doc.seek_next_non_empty_line()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 194, "name": "eof", "kind": "ref", "category": "function", "info": " if self._doc.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 197, "name": "peek", "kind": "ref", "category": "function", "info": " l1 = self._doc.peek().strip() # e.g. Parameters\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 202, "name": "peek", "kind": "ref", "category": "function", "info": " l2 = self._doc.peek(1).strip() # ---------- or ==========\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 205, "name": "_strip", "kind": "def", "category": "function", "info": " def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 218, "name": "_read_to_next_section", "kind": "def", "category": "function", "info": " def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 219, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 222, "name": "peek", "kind": "ref", "category": "function", "info": " if not self._doc.peek(-1).strip(): # previous line was empty\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 225, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section += self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 229, "name": "_read_sections", "kind": "def", "category": "function", "info": " def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 230, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._doc.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 231, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " data = self._read_to_next_section()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 239, "name": "_strip", "kind": "ref", "category": "function", "info": " yield name, self._strip(data[2:])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 241, "name": "_parse_param_list", "kind": "def", "category": "function", "info": " def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 242, "name": "Reader", "kind": "ref", "category": "function", "info": " r = Reader(content)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 244, "name": "eof", "kind": "ref", "category": "function", "info": " while not r.eof():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 245, "name": "read", "kind": "ref", "category": "function", "info": " header = r.read().strip()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 254, "name": "read_to_next_unindented_line", "kind": "ref", "category": "function", "info": " desc = r.read_to_next_unindented_line()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 255, "name": "dedent_lines", "kind": "ref", "category": "function", "info": " desc = dedent_lines(desc)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 256, "name": "strip_blank_lines", "kind": "ref", "category": "function", "info": " desc = strip_blank_lines(desc)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 298, "name": "_parse_see_also", "kind": "def", "category": "function", "info": " def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 309, "name": "parse_item_name", "kind": "def", "category": "function", "info": " def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 313, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{text} is not a item name\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 314, "name": "group", "kind": "ref", "category": "function", "info": " role = m.group('role')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 316, "name": "end", "kind": "ref", "category": "function", "info": " return name, role, m.end()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 326, "name": "group", "kind": "ref", "category": "function", "info": " description = line_match.group('desc')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 327, "name": "group", "kind": "ref", "category": "function", "info": " if line_match.group('trailing') and description:\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 328, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 330, "name": "end", "kind": "ref", "category": "function", "info": " 'line \"%s\"' % (line_match.end('trailing'), line),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 336, "name": "group", "kind": "ref", "category": "function", "info": " text = line_match.group('allfuncs')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 340, "name": "parse_item_name", "kind": "ref", "category": "function", "info": " name, role, match_end = parse_item_name(text)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 348, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{line} is not a item name\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 351, "name": "_parse_index", "kind": "def", "category": "function", "info": " def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 357, "name": "strip_each_in", "kind": "def", "category": "function", "info": " def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 363, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out['default'] = strip_each_in(section[1].split(','))[0]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 367, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out[line[1]] = strip_each_in(line[2].split(','))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 370, "name": "_parse_summary", "kind": "def", "category": "function", "info": " def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 372, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if self._is_at_section():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 377, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " summary = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 382, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 389, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 390, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " self['Extended Summary'] = self._read_to_next_section()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 392, "name": "_parse", "kind": "def", "category": "function", "info": " def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 393, "name": "reset", "kind": "ref", "category": "function", "info": " self._doc.reset()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 394, "name": "_parse_summary", "kind": "ref", "category": "function", "info": " self._parse_summary()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 396, "name": "_read_sections", "kind": "ref", "category": "function", "info": " sections = list(self._read_sections())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 414, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"The section {section} appears twice\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 418, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(content)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 420, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 423, "name": "_parse_index", "kind": "ref", "category": "function", "info": " self['index'] = self._parse_index(section, content)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 425, "name": "_parse_see_also", "kind": "ref", "category": "function", "info": " self['See Also'] = self._parse_see_also(content)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 429, "name": "_error_location", "kind": "def", "category": "function", "info": " def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 444, "name": "_str_header", "kind": "def", "category": "function", "info": " def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 447, "name": "_str_indent", "kind": "def", "category": "function", "info": " def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 453, "name": "_str_signature", "kind": "def", "category": "function", "info": " def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 459, "name": "_str_summary", "kind": "def", "category": "function", "info": " def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 465, "name": "_str_extended_summary", "kind": "def", "category": "function", "info": " def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 471, "name": "_str_param_list", "kind": "def", "category": "function", "info": " def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 474, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 483, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent(param.desc)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 487, "name": "_str_section", "kind": "def", "category": "function", "info": " def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 490, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 495, "name": "_str_see_also", "kind": "def", "category": "function", "info": " def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 499, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(\"See Also\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 516, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([' '.join(desc)])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 520, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([self.empty_description])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 527, "name": "_str_index", "kind": "def", "category": "function", "info": " def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 547, "name": "_str_signature", "kind": "ref", "category": "function", "info": " out += self._str_signature()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 548, "name": "_str_summary", "kind": "ref", "category": "function", "info": " out += self._str_summary()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 549, "name": "_str_extended_summary", "kind": "ref", "category": "function", "info": " out += self._str_extended_summary()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 552, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 553, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section('Warnings')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 554, "name": "_str_see_also", "kind": "ref", "category": "function", "info": " out += self._str_see_also(func_role)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 556, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section(s)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 558, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 559, "name": "_str_index", "kind": "ref", "category": "function", "info": " out += self._str_index()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 571, "name": "dedent_lines", "kind": "def", "category": "function", "info": "def dedent_lines(lines):\n \"\"\"Deindent a list of lines maximally\"\"\"\n return textwrap.dedent(\"\\n\".join(lines)).split(\"\\n\")\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 576, "name": "header", "kind": "def", "category": "function", "info": "def header(text, style='-'):\n return text + '\\n' + style*len(text) + '\\n'\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 580, "name": "FunctionDoc", "kind": "def", "category": "class", "info": "__init__\tget_func\t__str__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 592, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 608, "name": "get_func", "kind": "def", "category": "function", "info": " def get_func(self):\n func_name = getattr(self._f, '__name__', self.__class__.__name__)\n if inspect.isclass(self._f):\n func = getattr(self._f, '__call__', self._f.__init__)\n else:\n func = self._f\n return func, func_name\n\n def __str__(self):\n out = ''\n\n func, func_name = self.get_func()\n\n roles = {'func': 'function',\n 'meth': 'method'}\n\n if self._role:\n if self._role not in roles:\n print(f\"Warning: invalid role {self._role}\")\n out += f\".. {roles.get(self._role, '')}:: {func_name}\\n \\n\\n\"\n\n out += super().__str__(func_role=self._role)\n return out\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 619, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 633, "name": "ClassDoc", "kind": "def", "category": "class", "info": "__init__\tmethods\tproperties\t_is_show_member"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 668, "name": "splitlines_x", "kind": "def", "category": "function", "info": " def splitlines_x(s):\n if not s:\n return []\n else:\n return s.splitlines()\n for field, items in [('Methods', self.methods),\n ('Attributes', self.properties)]:\n if not self[field]:\n doc_list = []\n for name in sorted(items):\n if (name in _exclude or\n (_members and name not in _members)):\n continue\n try:\n doc_item = pydoc.getdoc(getattr(self._cls, name))\n doc_list.append(\n Parameter(name, '', splitlines_x(doc_item)))\n except AttributeError:\n pass # method doesn't exist\n self[field] = doc_list\n\n @property\n def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 684, "name": "splitlines_x", "kind": "ref", "category": "function", "info": " Parameter(name, '', splitlines_x(doc_item)))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 690, "name": "methods", "kind": "def", "category": "function", "info": " def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 697, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 700, "name": "properties", "kind": "def", "category": "function", "info": " def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 707, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 709, "name": "_is_show_member", "kind": "def", "category": "function", "info": " def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 30, "name": "husl_to_rgb", "kind": "def", "category": "function", "info": "def husl_to_rgb(h, s, l):\n return lch_to_rgb(*husl_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "husl_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 34, "name": "husl_to_hex", "kind": "def", "category": "function", "info": "def husl_to_hex(h, s, l):\n return rgb_to_hex(husl_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 38, "name": "rgb_to_husl", "kind": "def", "category": "function", "info": "def rgb_to_husl(r, g, b):\n return lch_to_husl(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "lch_to_husl", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 42, "name": "hex_to_husl", "kind": "def", "category": "function", "info": "def hex_to_husl(hex):\n return rgb_to_husl(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 46, "name": "huslp_to_rgb", "kind": "def", "category": "function", "info": "def huslp_to_rgb(h, s, l):\n return lch_to_rgb(*huslp_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "huslp_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 50, "name": "huslp_to_hex", "kind": "def", "category": "function", "info": "def huslp_to_hex(h, s, l):\n return rgb_to_hex(huslp_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "huslp_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 54, "name": "rgb_to_huslp", "kind": "def", "category": "function", "info": "def rgb_to_huslp(r, g, b):\n return lch_to_huslp(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "lch_to_huslp", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 58, "name": "hex_to_huslp", "kind": "def", "category": "function", "info": "def hex_to_huslp(hex):\n return rgb_to_huslp(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "rgb_to_huslp", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 62, "name": "lch_to_rgb", "kind": "def", "category": "function", "info": "def lch_to_rgb(l, c, h):\n return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "xyz_to_rgb", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "luv_to_xyz", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "lch_to_luv", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 66, "name": "rgb_to_lch", "kind": "def", "category": "function", "info": "def rgb_to_lch(r, g, b):\n return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "luv_to_lch", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "xyz_to_luv", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "rgb_to_xyz", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 70, "name": "max_chroma", "kind": "def", "category": "function", "info": "def max_chroma(L, H):\n hrad = math.radians(H)\n sinH = (math.sin(hrad))\n cosH = (math.cos(hrad))\n sub1 = (math.pow(L + 16, 3.0) / 1560896.0)\n sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)\n result = float(\"inf\")\n for row in m:\n m1 = row[0]\n m2 = row[1]\n m3 = row[2]\n top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)\n rbottom = (0.86330 * m3 - 0.17266 * m2)\n lbottom = (0.12949 * m3 - 0.38848 * m1)\n bottom = (rbottom * sinH + lbottom * cosH) * sub2\n\n for t in (0.0, 1.0):\n C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))\n if C > 0.0 and C < result:\n result = C\n return result\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 93, "name": "_hrad_extremum", "kind": "def", "category": "function", "info": "def _hrad_extremum(L):\n lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0\n rhs = 1107.0 / 125000.0\n sub = lhs if lhs > rhs else 10.0 * L / 9033.0\n chroma = float(\"inf\")\n result = None\n for row in m:\n for limit in (0.0, 1.0):\n [m1, m2, m3] = row\n top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit\n bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub\n hrad = math.atan2(top, bottom)\n # This is a math hack to deal with tan quadrants, I'm too lazy to figure\n # out how to do this properly\n if limit == 0.0:\n hrad += math.pi\n test = max_chroma(L, math.degrees(hrad))\n if test < chroma:\n chroma = test\n result = hrad\n return result\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 109, "name": "max_chroma", "kind": "ref", "category": "function", "info": " test = max_chroma(L, math.degrees(hrad))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 116, "name": "max_chroma_pastel", "kind": "def", "category": "function", "info": "def max_chroma_pastel(L):\n H = math.degrees(_hrad_extremum(L))\n return max_chroma(L, H)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 117, "name": "_hrad_extremum", "kind": "ref", "category": "function", "info": " H = math.degrees(_hrad_extremum(L))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 118, "name": "max_chroma", "kind": "ref", "category": "function", "info": " return max_chroma(L, H)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 121, "name": "dot_product", "kind": "def", "category": "function", "info": "def dot_product(a, b):\n return sum(map(operator.mul, a, b))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 125, "name": "f", "kind": "def", "category": "function", "info": "def f(t):\n if t > lab_e:\n return (math.pow(t, 1.0 / 3.0))\n else:\n return (7.787 * t + 16.0 / 116.0)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 132, "name": "f_inv", "kind": "def", "category": "function", "info": "def f_inv(t):\n if math.pow(t, 3.0) > lab_e:\n return (math.pow(t, 3.0))\n else:\n return (116.0 * t - 16.0) / lab_k\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 139, "name": "from_linear", "kind": "def", "category": "function", "info": "def from_linear(c):\n if c <= 0.0031308:\n return 12.92 * c\n else:\n return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 146, "name": "to_linear", "kind": "def", "category": "function", "info": "def to_linear(c):\n a = 0.055\n\n if c > 0.04045:\n return (math.pow((c + a) / (1.0 + a), 2.4))\n else:\n return (c / 12.92)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 155, "name": "rgb_prepare", "kind": "def", "category": "function", "info": "def rgb_prepare(triple):\n ret = []\n for ch in triple:\n ch = round(ch, 3)\n\n if ch < -0.0001 or ch > 1.0001:\n raise Exception(f\"Illegal RGB value {ch:f}\")\n\n if ch < 0:\n ch = 0\n if ch > 1:\n ch = 1\n\n # Fix for Python 3 which by default rounds 4.5 down to 4.0\n # instead of Python 2 which is rounded to 5.0 which caused\n # a couple off by one errors in the tests. Tests now all pass\n # in Python 2 and Python 3\n ret.append(int(round(ch * 255 + 0.001, 0)))\n\n return ret\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 177, "name": "hex_to_rgb", "kind": "def", "category": "function", "info": "def hex_to_rgb(hex):\n if hex.startswith('#'):\n hex = hex[1:]\n r = int(hex[0:2], 16) / 255.0\n g = int(hex[2:4], 16) / 255.0\n b = int(hex[4:6], 16) / 255.0\n return [r, g, b]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 186, "name": "rgb_to_hex", "kind": "def", "category": "function", "info": "def rgb_to_hex(triple):\n [r, g, b] = triple\n return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 188, "name": "rgb_prepare", "kind": "ref", "category": "function", "info": " return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 191, "name": "xyz_to_rgb", "kind": "def", "category": "function", "info": "def xyz_to_rgb(triple):\n xyz = map(lambda row: dot_product(row, triple), m)\n return list(map(from_linear, xyz))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 192, "name": "dot_product", "kind": "ref", "category": "function", "info": " xyz = map(lambda row: dot_product(row, triple), m)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 196, "name": "rgb_to_xyz", "kind": "def", "category": "function", "info": "def rgb_to_xyz(triple):\n rgbl = list(map(to_linear, triple))\n return list(map(lambda row: dot_product(row, rgbl), m_inv))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 198, "name": "dot_product", "kind": "ref", "category": "function", "info": " return list(map(lambda row: dot_product(row, rgbl), m_inv))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 201, "name": "xyz_to_luv", "kind": "def", "category": "function", "info": "def xyz_to_luv(triple):\n X, Y, Z = triple\n\n if X == Y == Z == 0.0:\n return [0.0, 0.0, 0.0]\n\n varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))\n varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))\n L = 116.0 * f(Y / refY) - 16.0\n\n # Black will create a divide-by-zero error\n if L == 0.0:\n return [0.0, 0.0, 0.0]\n\n U = 13.0 * L * (varU - refU)\n V = 13.0 * L * (varV - refV)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 209, "name": "f", "kind": "ref", "category": "function", "info": " L = 116.0 * f(Y / refY) - 16.0\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 221, "name": "luv_to_xyz", "kind": "def", "category": "function", "info": "def luv_to_xyz(triple):\n L, U, V = triple\n\n if L == 0:\n return [0.0, 0.0, 0.0]\n\n varY = f_inv((L + 16.0) / 116.0)\n varU = U / (13.0 * L) + refU\n varV = V / (13.0 * L) + refV\n Y = varY * refY\n X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)\n Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)\n\n return [X, Y, Z]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 227, "name": "f_inv", "kind": "ref", "category": "function", "info": " varY = f_inv((L + 16.0) / 116.0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 237, "name": "luv_to_lch", "kind": "def", "category": "function", "info": "def luv_to_lch(triple):\n L, U, V = triple\n\n C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))\n hrad = (math.atan2(V, U))\n H = math.degrees(hrad)\n if H < 0.0:\n H = 360.0 + H\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 249, "name": "lch_to_luv", "kind": "def", "category": "function", "info": "def lch_to_luv(triple):\n L, C, H = triple\n\n Hrad = math.radians(H)\n U = (math.cos(Hrad) * C)\n V = (math.sin(Hrad) * C)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 259, "name": "husl_to_lch", "kind": "def", "category": "function", "info": "def husl_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma(L, H)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 267, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 273, "name": "lch_to_husl", "kind": "def", "category": "function", "info": "def lch_to_husl(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma(L, H)\n S = C / mx * 100.0\n\n return [H, S, L]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 281, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 287, "name": "huslp_to_lch", "kind": "def", "category": "function", "info": "def huslp_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma_pastel(L)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 295, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 301, "name": "lch_to_huslp", "kind": "def", "category": "function", "info": "def lch_to_huslp(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma_pastel(L)\n S = C / mx * 100.0\n\n return [H, S, L]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 309, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 81, "name": "gaussian_kde", "kind": "def", "category": "class", "info": "__init__\tevaluate\tscotts_factor\tsilverman_factor\tset_bandwidth\t_compute_covariance\tpdf\tweights\tneff"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 202, "name": "astype", "kind": "ref", "category": "function", "info": " self._weights = atleast_1d(weights).astype(float)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 210, "name": "set_bandwidth", "kind": "ref", "category": "function", "info": " self.set_bandwidth(bw_method=bw_method)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 212, "name": "evaluate", "kind": "def", "category": "function", "info": " def evaluate(self, points):\n \"\"\"Evaluate the estimated pdf on a set of points.\n\n Parameters\n ----------\n points : (# of dimensions, # of points)-array\n Alternatively, a (# of dimensions,) vector can be passed in and\n treated as a single point.\n\n Returns\n -------\n values : (# of points,)-array\n The values at each point.\n\n Raises\n ------\n ValueError : if the dimensionality of the input points is different than\n the dimensionality of the KDE.\n\n \"\"\"\n points = atleast_2d(asarray(points))\n\n d, m = points.shape\n if d != self.d:\n if d == 1 and m == self.d:\n # points was passed in as a row vector\n points = reshape(points, (self.d, 1))\n m = 1\n else:\n msg = f\"points have dimension {d}, dataset has dimension {self.d}\"\n raise ValueError(msg)\n\n output_dtype = np.common_type(self.covariance, points)\n result = zeros((m,), dtype=output_dtype)\n\n whitening = linalg.cholesky(self.inv_cov)\n scaled_dataset = dot(whitening, self.dataset)\n scaled_points = dot(whitening, points)\n\n if m >= self.n:\n # there are more points than data, so loop over data\n for i in range(self.n):\n diff = scaled_dataset[:, i, newaxis] - scaled_points\n energy = sum(diff * diff, axis=0) / 2.0\n result += self.weights[i]*exp(-energy)\n else:\n # loop over points\n for i in range(m):\n diff = scaled_dataset - scaled_points[:, i, newaxis]\n energy = sum(diff * diff, axis=0) / 2.0\n result[i] = sum(exp(-energy)*self.weights, axis=0)\n\n result = result / self._norm_factor\n\n return result\n\n __call__ = evaluate\n\n def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 270, "name": "scotts_factor", "kind": "def", "category": "function", "info": " def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 280, "name": "silverman_factor", "kind": "def", "category": "function", "info": " def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 298, "name": "set_bandwidth", "kind": "def", "category": "function", "info": " def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 330, "name": "_bw_method", "kind": "ref", "category": "function", "info": " self.covariance_factor = lambda: self._bw_method(self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 336, "name": "_compute_covariance", "kind": "ref", "category": "function", "info": " self._compute_covariance()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 338, "name": "_compute_covariance", "kind": "def", "category": "function", "info": " def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 342, "name": "covariance_factor", "kind": "ref", "category": "function", "info": " self.factor = self.covariance_factor()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 354, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 364, "name": "evaluate", "kind": "ref", "category": "function", "info": " return self.evaluate(x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 367, "name": "weights", "kind": "def", "category": "function", "info": " def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 375, "name": "neff", "kind": "def", "category": "function", "info": " def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 33, "name": "InfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 58, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> \"NegativeInfinityType\":\n return NegativeInfinity\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 62, "name": "InfinityType", "kind": "ref", "category": "function", "info": "Infinity = InfinityType()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 65, "name": "NegativeInfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 90, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> InfinityType:\n return Infinity\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 94, "name": "NegativeInfinityType", "kind": "ref", "category": "function", "info": "NegativeInfinity = NegativeInfinityType()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 127, "name": "InvalidVersion", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 133, "name": "_BaseVersion", "kind": "def", "category": "class", "info": "__hash__\t__lt__\t__le__\t__eq__\t__ge__\t__gt__\t__ne__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 213, "name": "Version", "kind": "def", "category": "class", "info": "__init__\t__repr__\t__str__\tepoch\trelease\tpre\tpost\tdev\tlocal\tpublic\tbase_version\tis_prerelease\tis_postrelease\tis_devrelease\tmajor\tminor\tmicro"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 222, "name": "InvalidVersion", "kind": "ref", "category": "function", "info": " raise InvalidVersion(f\"Invalid version: '{version}'\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 225, "name": "_Version", "kind": "ref", "category": "function", "info": " self._version = _Version(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 227, "name": "group", "kind": "ref", "category": "function", "info": " release=tuple(int(i) for i in match.group(\"release\").split(\".\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 229, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " post=_parse_letter_version(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "_parse_local_version", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "group", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 237, "name": "_cmpkey", "kind": "ref", "category": "function", "info": " self._key = _cmpkey(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 278, "name": "epoch", "kind": "def", "category": "function", "info": " def epoch(self) -> int:\n _epoch: int = self._version.epoch\n return _epoch\n\n @property\n def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 283, "name": "release", "kind": "def", "category": "function", "info": " def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 288, "name": "pre", "kind": "def", "category": "function", "info": " def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 293, "name": "post", "kind": "def", "category": "function", "info": " def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 297, "name": "dev", "kind": "def", "category": "function", "info": " def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 301, "name": "local", "kind": "def", "category": "function", "info": " def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 308, "name": "public", "kind": "def", "category": "function", "info": " def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 312, "name": "base_version", "kind": "def", "category": "function", "info": " def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 325, "name": "is_prerelease", "kind": "def", "category": "function", "info": " def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 329, "name": "is_postrelease", "kind": "def", "category": "function", "info": " def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 333, "name": "is_devrelease", "kind": "def", "category": "function", "info": " def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 337, "name": "major", "kind": "def", "category": "function", "info": " def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 341, "name": "minor", "kind": "def", "category": "function", "info": " def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 345, "name": "micro", "kind": "def", "category": "function", "info": " def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 349, "name": "_parse_letter_version", "kind": "def", "category": "function", "info": "def _parse_letter_version(\n letter: str, number: Union[str, bytes, SupportsInt]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 388, "name": "_parse_local_version", "kind": "def", "category": "function", "info": "def _parse_local_version(local: str) -> Optional[LocalType]:\n \"\"\"\n Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").\n \"\"\"\n if local is not None:\n return tuple(\n part.lower() if not part.isdigit() else int(part)\n for part in _local_version_separators.split(local)\n )\n return None\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 400, "name": "_cmpkey", "kind": "def", "category": "function", "info": "def _cmpkey(\n epoch: int,\n release: Tuple[int, ...],\n pre: Optional[Tuple[str, int]],\n post: Optional[Tuple[str, int]],\n dev: Optional[Tuple[str, int]],\n local: Optional[Tuple[SubLocalType]],\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 30, "name": "_index_to_label", "kind": "def", "category": "function", "info": "def _index_to_label(index):\n \"\"\"Convert a pandas index or multiindex to an axis label.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return \"-\".join(map(to_utf8, index.names))\n else:\n return index.name\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 38, "name": "_index_to_ticklabels", "kind": "def", "category": "function", "info": "def _index_to_ticklabels(index):\n \"\"\"Convert a pandas index or multiindex into ticklabels.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return [\"-\".join(map(to_utf8, i)) for i in index.values]\n else:\n return index.values\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 46, "name": "_convert_colors", "kind": "def", "category": "function", "info": "def _convert_colors(colors):\n \"\"\"Convert either a list of colors or nested lists of colors to RGB.\"\"\"\n to_rgb = mpl.colors.to_rgb\n\n try:\n to_rgb(colors[0])\n # If this works, there is only one level of colors\n return list(map(to_rgb, colors))\n except ValueError:\n # If we get here, we have nested lists\n return [list(map(to_rgb, l)) for l in colors]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 51, "name": "to_rgb", "kind": "ref", "category": "function", "info": " to_rgb(colors[0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 59, "name": "_matrix_mask", "kind": "def", "category": "function", "info": "def _matrix_mask(data, mask):\n \"\"\"Ensure that data and mask are compatible and add missing values.\n\n Values will be plotted for cells where ``mask`` is ``False``.\n\n ``data`` is expected to be a DataFrame; ``mask`` can be an array or\n a DataFrame.\n\n \"\"\"\n if mask is None:\n mask = np.zeros(data.shape, bool)\n\n if isinstance(mask, np.ndarray):\n # For array masks, ensure that shape matches data then convert\n if mask.shape != data.shape:\n raise ValueError(\"Mask must have the same shape as data.\")\n\n mask = pd.DataFrame(mask,\n index=data.index,\n columns=data.columns,\n dtype=bool)\n\n elif isinstance(mask, pd.DataFrame):\n # For DataFrame masks, ensure that semantic labels match data\n if not mask.index.equals(data.index) \\\n and mask.columns.equals(data.columns):\n err = \"Mask must have the same index and columns as data.\"\n raise ValueError(err)\n\n # Add any cells with missing data to the mask\n # This works around an issue where `plt.pcolormesh` doesn't represent\n # missing data properly\n mask = mask | pd.isnull(data)\n\n return mask\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 83, "name": "equals", "kind": "ref", "category": "function", "info": " if not mask.index.equals(data.index) \\\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 84, "name": "equals", "kind": "ref", "category": "function", "info": " and mask.columns.equals(data.columns):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 96, "name": "_HeatMapper", "kind": "def", "category": "class", "info": "__init__\t_determine_cmap_params\t_annotate_heatmap\t_skip_ticks\t_auto_ticks\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 112, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " mask = _matrix_mask(data, mask)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 114, "name": "masked_where", "kind": "ref", "category": "function", "info": " plot_data = np.ma.masked_where(np.asarray(mask), plot_data)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 120, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 122, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 129, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 131, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 140, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 142, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.xticks, self.xticklabels = self._skip_ticks(xticklabels,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 150, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 152, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.yticks, self.yticklabels = self._skip_ticks(yticklabels,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 156, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " xlabel = _index_to_label(data.columns)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 157, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " ylabel = _index_to_label(data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 162, "name": "_determine_cmap_params", "kind": "ref", "category": "function", "info": " self._determine_cmap_params(plot_data, vmin, vmax,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 191, "name": "_determine_cmap_params", "kind": "def", "category": "function", "info": " def _determine_cmap_params(self, plot_data, vmin, vmax,\n cmap, center, robust):\n \"\"\"Use some heuristics to set good defaults for colorbar and range.\"\"\"\n\n # plot_data is a np.ma.array instance\n calc_data = plot_data.astype(float).filled(np.nan)\n if vmin is None:\n if robust:\n vmin = np.nanpercentile(calc_data, 2)\n else:\n vmin = np.nanmin(calc_data)\n if vmax is None:\n if robust:\n vmax = np.nanpercentile(calc_data, 98)\n else:\n vmax = np.nanmax(calc_data)\n self.vmin, self.vmax = vmin, vmax\n\n # Choose default colormaps if not provided\n if cmap is None:\n if center is None:\n self.cmap = cm.rocket\n else:\n self.cmap = cm.icefire\n elif isinstance(cmap, str):\n self.cmap = get_colormap(cmap)\n elif isinstance(cmap, list):\n self.cmap = mpl.colors.ListedColormap(cmap)\n else:\n self.cmap = cmap\n\n # Recenter a divergent colormap\n if center is not None:\n\n # Copy bad values\n # in mpl<3.2 only masked values are honored with \"bad\" color spec\n # (see https://github.com/matplotlib/matplotlib/pull/14257)\n bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n\n # under/over values are set for sure when cmap extremes\n # do not map to the same color as +-inf\n under = self.cmap(-np.inf)\n over = self.cmap(np.inf)\n under_set = under != self.cmap(0)\n over_set = over != self.cmap(self.cmap.N - 1)\n\n vrange = max(vmax - center, center - vmin)\n normlize = mpl.colors.Normalize(center - vrange, center + vrange)\n cmin, cmax = normlize([vmin, vmax])\n cc = np.linspace(cmin, cmax, 256)\n self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n self.cmap.set_bad(bad)\n if under_set:\n self.cmap.set_under(under)\n if over_set:\n self.cmap.set_over(over)\n\n def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 196, "name": "astype", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 196, "name": "filled", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 216, "name": "get_colormap", "kind": "ref", "category": "function", "info": " self.cmap = get_colormap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 218, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 228, "name": "cmap", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 228, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 232, "name": "cmap", "kind": "ref", "category": "function", "info": " under = self.cmap(-np.inf)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 233, "name": "cmap", "kind": "ref", "category": "function", "info": " over = self.cmap(np.inf)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 234, "name": "cmap", "kind": "ref", "category": "function", "info": " under_set = under != self.cmap(0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 235, "name": "cmap", "kind": "ref", "category": "function", "info": " over_set = over != self.cmap(self.cmap.N - 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 239, "name": "normlize", "kind": "ref", "category": "function", "info": " cmin, cmax = normlize([vmin, vmax])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "cmap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 242, "name": "set_bad", "kind": "ref", "category": "function", "info": " self.cmap.set_bad(bad)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 244, "name": "set_under", "kind": "ref", "category": "function", "info": " self.cmap.set_under(under)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 246, "name": "set_over", "kind": "ref", "category": "function", "info": " self.cmap.set_over(over)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 248, "name": "_annotate_heatmap", "kind": "def", "category": "function", "info": " def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 257, "name": "relative_luminance", "kind": "ref", "category": "function", "info": " lum = relative_luminance(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 264, "name": "_skip_ticks", "kind": "def", "category": "function", "info": " def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 277, "name": "_auto_ticks", "kind": "def", "category": "function", "info": " def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 279, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = ax.figure.dpi_scale_trans.inverted()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 280, "name": "transformed", "kind": "ref", "category": "function", "info": " bbox = ax.get_window_extent().transformed(transform)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 283, "name": "set_ticks", "kind": "ref", "category": "function", "info": " tick, = axis.set_ticks([0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 284, "name": "get_size", "kind": "ref", "category": "function", "info": " fontsize = tick.label1.get_size()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 290, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " ticks, labels = self._skip_ticks(labels, tick_every)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 296, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 311, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 324, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 329, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 334, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(xticklabels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 335, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 339, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 341, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 343, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 351, "name": "_annotate_heatmap", "kind": "ref", "category": "function", "info": " self._annotate_heatmap(ax, mesh)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 354, "name": "heatmap", "kind": "def", "category": "function", "info": "def heatmap(\n data, *,\n vmin=None, vmax=None, cmap=None, center=None, robust=False,\n annot=None, fmt=\".2g\", annot_kws=None,\n linewidths=0, linecolor=\"white\",\n cbar=True, cbar_kws=None, cbar_ax=None,\n square=False, xticklabels=\"auto\", yticklabels=\"auto\",\n mask=None, ax=None,\n **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 542, "name": "_HeatMapper", "kind": "ref", "category": "function", "info": " plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 554, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect(\"equal\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 559, "name": "_DendrogramPlotter", "kind": "def", "category": "class", "info": "__init__\t_calculate_linkage_scipy\t_calculate_linkage_fastcluster\tcalculated_linkage\tcalculate_dendrogram\treordered_ind\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 594, "name": "calculate_dendrogram", "kind": "ref", "category": "function", "info": " self.dendrogram = self.calculate_dendrogram()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 600, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " ticklabels = _index_to_ticklabels(self.data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 608, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.ylabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 616, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.xlabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 625, "name": "_calculate_linkage_scipy", "kind": "def", "category": "function", "info": " def _calculate_linkage_scipy(self):\n linkage = hierarchy.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 630, "name": "_calculate_linkage_fastcluster", "kind": "def", "category": "function", "info": " def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 639, "name": "linkage_vector", "kind": "ref", "category": "function", "info": " return fastcluster.linkage_vector(self.array,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 648, "name": "calculated_linkage", "kind": "def", "category": "function", "info": " def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 651, "name": "_calculate_linkage_fastcluster", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_fastcluster()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 658, "name": "_calculate_linkage_scipy", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_scipy()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 660, "name": "calculate_dendrogram", "kind": "def", "category": "function", "info": " def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 677, "name": "reordered_ind", "kind": "def", "category": "function", "info": " def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 701, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 706, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 710, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, number_of_leaves * 10)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 711, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 713, "name": "invert_xaxis", "kind": "ref", "category": "function", "info": " ax.invert_xaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 714, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 718, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, number_of_leaves * 10)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 719, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 721, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, bottom=True, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 725, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(self.xticklabels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 726, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 729, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 731, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 733, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 783, "name": "_DendrogramPlotter", "kind": "ref", "category": "function", "info": " plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 792, "name": "ClusterGrid", "kind": "def", "category": "class", "info": "__init__\t_preprocess_colors\tformat_data\tz_score\tstandard_scale\tdim_ratios\tcolor_list_to_matrix_and_cmap\tplot_dendrograms\tplot_colors\tplot_matrix\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 806, "name": "format_data", "kind": "ref", "category": "function", "info": " self.data2d = self.format_data(self.data, pivot_kws, z_score,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 809, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " self.mask = _matrix_mask(self.data2d, mask)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 814, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, row_colors, axis=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 816, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, col_colors, axis=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 828, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " width_ratios = self.dim_ratios(self.row_colors,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 831, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " height_ratios = self.dim_ratios(self.col_colors,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 842, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 843, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 844, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 845, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 851, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_colors = self._figure.add_subplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 854, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_colors = self._figure.add_subplot(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 857, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 863, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 870, "name": "_preprocess_colors", "kind": "def", "category": "function", "info": " def _preprocess_colors(self, data, colors, axis):\n \"\"\"Preprocess {row/col}_colors to extract labels and convert colors.\"\"\"\n labels = None\n\n if colors is not None:\n if isinstance(colors, (pd.DataFrame, pd.Series)):\n\n # If data is unindexed, raise\n if (not hasattr(data, \"index\") and axis == 0) or (\n not hasattr(data, \"columns\") and axis == 1\n ):\n axis_name = \"col\" if axis else \"row\"\n msg = (f\"{axis_name}_colors indices can't be matched with data \"\n f\"indices. Provide {axis_name}_colors as a non-indexed \"\n \"datatype, e.g. by using `.to_numpy()``\")\n raise TypeError(msg)\n\n # Ensure colors match data indices\n if axis == 0:\n colors = colors.reindex(data.index)\n else:\n colors = colors.reindex(data.columns)\n\n # Replace na's with white color\n # TODO We should set these to transparent instead\n colors = colors.astype(object).fillna('white')\n\n # Extract color values and labels from frame/series\n if isinstance(colors, pd.DataFrame):\n labels = list(colors.columns)\n colors = colors.T.values\n else:\n if colors.name is None:\n labels = [\"\"]\n else:\n labels = [colors.name]\n colors = colors.values\n\n colors = _convert_colors(colors)\n\n return colors, labels\n\n def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 889, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.index)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 891, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.columns)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 895, "name": "astype", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 895, "name": "fillna", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 908, "name": "_convert_colors", "kind": "ref", "category": "function", "info": " colors = _convert_colors(colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 912, "name": "format_data", "kind": "def", "category": "function", "info": " def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 927, "name": "z_score", "kind": "ref", "category": "function", "info": " data2d = self.z_score(data2d, z_score)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 929, "name": "standard_scale", "kind": "ref", "category": "function", "info": " data2d = self.standard_scale(data2d, standard_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 933, "name": "z_score", "kind": "def", "category": "function", "info": " def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 963, "name": "standard_scale", "kind": "def", "category": "function", "info": " def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 996, "name": "dim_ratios", "kind": "def", "category": "function", "info": " def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1015, "name": "color_list_to_matrix_and_cmap", "kind": "def", "category": "function", "info": " def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1039, "name": "to_rgb", "kind": "ref", "category": "function", "info": " mpl.colors.to_rgb(colors[0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1063, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(list(unique_colors))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1066, "name": "plot_dendrograms", "kind": "def", "category": "function", "info": " def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1076, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_xticks([])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1077, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_yticks([])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1086, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_xticks([])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1087, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_yticks([])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1088, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1089, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1091, "name": "plot_colors", "kind": "def", "category": "function", "info": " def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1117, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1126, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1131, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1133, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_row_colors, left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1137, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1146, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1151, "name": "tick_right", "kind": "ref", "category": "function", "info": " self.ax_col_colors.yaxis.tick_right()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1152, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1154, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_col_colors, left=True, bottom=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1156, "name": "plot_matrix", "kind": "def", "category": "function", "info": " def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1189, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1193, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1194, "name": "get_rotation", "kind": "ref", "category": "function", "info": " ytl_rot = None if not ytl else ytl[0].get_rotation()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1195, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1196, "name": "set_label_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_label_position('right')\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1198, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1208, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1210, "name": "set_axis_on", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_on()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1211, "name": "set_position", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_position(self.cbar_pos)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1225, "name": "plot_dendrograms", "kind": "ref", "category": "function", "info": " self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1237, "name": "plot_colors", "kind": "ref", "category": "function", "info": " self.plot_colors(xind, yind, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1238, "name": "plot_matrix", "kind": "ref", "category": "function", "info": " self.plot_matrix(colorbar_kws, xind, yind, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1242, "name": "clustermap", "kind": "def", "category": "function", "info": "def clustermap(\n data, *,\n pivot_kws=None, method='average', metric='euclidean',\n z_score=None, standard_scale=None, figsize=(10, 10),\n cbar_kws=None, row_cluster=True, col_cluster=True,\n row_linkage=None, col_linkage=None,\n row_colors=None, col_colors=None, mask=None,\n dendrogram_ratio=.2, colors_ratio=0.03,\n cbar_pos=(.02, .8, .05, .18), tree_kws=None,\n **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1410, "name": "ClusterGrid", "kind": "ref", "category": "function", "info": " plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 8, "name": "palplot", "kind": "def", "category": "function", "info": "def palplot(pal, size=1):\n \"\"\"Plot the values in a color palette as a horizontal array.\n\n Parameters\n ----------\n pal : sequence of matplotlib colors\n colors, i.e. as returned by seaborn.color_palette()\n size :\n scaling factor for size of plot\n\n \"\"\"\n n = len(pal)\n f, ax = plt.subplots(1, 1, figsize=(n * size, size))\n ax.imshow(np.arange(n).reshape(1, n),\n cmap=mpl.colors.ListedColormap(list(pal)),\n interpolation=\"nearest\", aspect=\"auto\")\n ax.set_xticks(np.arange(n) - .5)\n ax.set_yticks([-.5, .5])\n # Ensure nice border between colors\n ax.set_xticklabels([\"\" for _ in range(n)])\n # The proper way to set no ticks\n ax.yaxis.set_major_locator(ticker.NullLocator())\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 22, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap=mpl.colors.ListedColormap(list(pal)),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 24, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(n) - .5)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 25, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks([-.5, .5])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 27, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels([\"\" for _ in range(n)])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 29, "name": "set_major_locator", "kind": "ref", "category": "function", "info": " ax.yaxis.set_major_locator(ticker.NullLocator())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 32, "name": "dogplot", "kind": "def", "category": "function", "info": "def dogplot(*_, **__):\n \"\"\"Who's a good boy?\"\"\"\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n from io import BytesIO\n\n url = \"https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png\"\n pic = np.random.randint(2, 7)\n data = BytesIO(urlopen(url.format(pic)).read())\n img = plt.imread(data)\n f, ax = plt.subplots(figsize=(5, 5), dpi=100)\n f.subplots_adjust(0, 0, 1, 1)\n ax.imshow(img)\n ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 41, "name": "randint", "kind": "ref", "category": "function", "info": " pic = np.random.randint(2, 7)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 47, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 60, "name": "_ColorPalette", "kind": "def", "category": "class", "info": "__enter__\t__exit__\tas_hex\t_repr_html_"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 62, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n \"\"\"Open the context.\"\"\"\n from .rcmod import set_palette\n self._orig_palette = color_palette()\n set_palette(self)\n return self\n\n def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 65, "name": "color_palette", "kind": "ref", "category": "function", "info": " self._orig_palette = color_palette()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 66, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 69, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 72, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self._orig_palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 74, "name": "as_hex", "kind": "def", "category": "function", "info": " def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 76, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 77, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(hex)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 79, "name": "_repr_html_", "kind": "def", "category": "function", "info": " def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 84, "name": "as_hex", "kind": "ref", "category": "function", "info": " for i, c in enumerate(self.as_hex()):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 93, "name": "color_palette", "kind": "def", "category": "function", "info": "def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):\n \"\"\"Return a list of colors or continuous colormap defining a palette.\n\n Possible ``palette`` values include:\n - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)\n - Name of matplotlib colormap\n - 'husl' or 'hls'\n - 'ch:'\n - 'light:', 'dark:', 'blend:,',\n - A sequence of colors in any format matplotlib accepts\n\n Calling this function with ``palette=None`` will return the current\n matplotlib color cycle.\n\n This function can also be used in a ``with`` statement to temporarily\n set the color cycle for a plot or set of plots.\n\n See the :ref:`tutorial ` for more information.\n\n Parameters\n ----------\n palette : None, string, or sequence, optional\n Name of palette or None to return current palette. If a sequence, input\n colors are used but possibly cycled and desaturated.\n n_colors : int, optional\n Number of colors in the palette. If ``None``, the default will depend\n on how ``palette`` is specified. Named palettes default to 6 colors,\n but grabbing the current palette or passing in a list of colors will\n not change the number of colors unless this is specified. Asking for\n more colors than exist in the palette will cause it to cycle. Ignored\n when ``as_cmap`` is True.\n desat : float, optional\n Proportion to desaturate each color by.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n set_palette : Set the default color cycle for all plots.\n set_color_codes : Reassign color codes like ``\"b\"``, ``\"g\"``, etc. to\n colors from one of the seaborn palettes.\n\n Examples\n --------\n\n .. include:: ../docstrings/color_palette.rst\n\n \"\"\"\n if palette is None:\n palette = get_color_cycle()\n if n_colors is None:\n n_colors = len(palette)\n\n elif not isinstance(palette, str):\n palette = palette\n if n_colors is None:\n n_colors = len(palette)\n else:\n\n if n_colors is None:\n # Use all colors in a qualitative palette or 6 of another kind\n n_colors = QUAL_PALETTE_SIZES.get(palette, 6)\n\n if palette in SEABORN_PALETTES:\n # Named \"seaborn variant\" of matplotlib default color cycle\n palette = SEABORN_PALETTES[palette]\n\n elif palette == \"hls\":\n # Evenly spaced colors in cylindrical RGB space\n palette = hls_palette(n_colors, as_cmap=as_cmap)\n\n elif palette == \"husl\":\n # Evenly spaced colors in cylindrical Lab space\n palette = husl_palette(n_colors, as_cmap=as_cmap)\n\n elif palette.lower() == \"jet\":\n # Paternalism\n raise ValueError(\"No.\")\n\n elif palette.startswith(\"ch:\"):\n # Cubehelix palette with params specified in string\n args, kwargs = _parse_cubehelix_args(palette)\n palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n\n elif palette.startswith(\"light:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"dark:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"blend:\"):\n # blend palette between colors specified in string\n _, colors = palette.split(\":\")\n colors = colors.split(\",\")\n palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n\n else:\n try:\n # Perhaps a named matplotlib colormap?\n palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n except (ValueError, KeyError): # Error class changed in mpl36\n raise ValueError(f\"{palette} is not a valid palette name\")\n\n if desat is not None:\n palette = [desaturate(c, desat) for c in palette]\n\n if not as_cmap:\n\n # Always return as many colors as we asked for\n pal_cycle = cycle(palette)\n palette = [next(pal_cycle) for _ in range(n_colors)]\n\n # Always return in r, g, b tuple format\n try:\n palette = map(mpl.colors.colorConverter.to_rgb, palette)\n palette = _ColorPalette(palette)\n except ValueError:\n raise ValueError(f\"Could not generate a palette for {palette}\")\n\n return palette\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 146, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " palette = get_color_cycle()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 166, "name": "hls_palette", "kind": "ref", "category": "function", "info": " palette = hls_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 170, "name": "husl_palette", "kind": "ref", "category": "function", "info": " palette = husl_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 178, "name": "_parse_cubehelix_args", "kind": "ref", "category": "function", "info": " args, kwargs = _parse_cubehelix_args(palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 179, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 187, "name": "light_palette", "kind": "ref", "category": "function", "info": " palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 195, "name": "dark_palette", "kind": "ref", "category": "function", "info": " palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 201, "name": "blend_palette", "kind": "ref", "category": "function", "info": " palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 206, "name": "mpl_palette", "kind": "ref", "category": "function", "info": " palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 211, "name": "desaturate", "kind": "ref", "category": "function", "info": " palette = [desaturate(c, desat) for c in palette]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 222, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " palette = _ColorPalette(palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 229, "name": "hls_palette", "kind": "def", "category": "function", "info": "def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa\n \"\"\"Get a set of evenly spaced colors in HLS hue space.\n\n h, l, and s should be between 0 and 1\n\n Parameters\n ----------\n\n n_colors : int\n number of colors in the palette\n h : float\n first hue\n l : float\n lightness\n s : float\n saturation\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n husl_palette : Make a palette using evenly spaced hues in the HUSL system.\n\n Examples\n --------\n\n Create a palette of 10 colors with the default parameters:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.hls_palette(10))\n\n Create a palette of 10 colors that begins at a different hue value:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.hls_palette(10, h=.5))\n\n Create a palette of 10 colors that are darker than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.hls_palette(10, l=.4))\n\n Create a palette of 10 colors that are less saturated than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.hls_palette(10, s=.4))\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues -= hues.astype(int)\n palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hls\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 292, "name": "astype", "kind": "ref", "category": "function", "info": " hues -= hues.astype(int)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 295, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hls\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 297, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 300, "name": "husl_palette", "kind": "def", "category": "function", "info": "def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa\n \"\"\"Get a set of evenly spaced colors in HUSL hue space.\n\n h, s, and l should be between 0 and 1\n\n Parameters\n ----------\n\n n_colors : int\n number of colors in the palette\n h : float\n first hue\n s : float\n saturation\n l : float\n lightness\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n hls_palette : Make a palette using evently spaced circular hues in the\n HSL system.\n\n Examples\n --------\n\n Create a palette of 10 colors with the default parameters:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.husl_palette(10))\n\n Create a palette of 10 colors that begins at a different hue value:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.husl_palette(10, h=.5))\n\n Create a palette of 10 colors that are darker than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.husl_palette(10, l=.4))\n\n Create a palette of 10 colors that are less saturated than the default:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.husl_palette(10, s=.4))\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues *= 359\n s *= 99\n l *= 99 # noqa\n palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hsl\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 367, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 369, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hsl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 371, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 374, "name": "mpl_palette", "kind": "def", "category": "function", "info": "def mpl_palette(name, n_colors=6, as_cmap=False):\n \"\"\"Return discrete colors from a matplotlib palette.\n\n Note that this handles the qualitative colorbrewer palettes\n properly, although if you ask for more colors than a particular\n qualitative palette can provide you will get fewer than you are\n expecting. In contrast, asking for qualitative color brewer palettes\n using :func:`color_palette` will return the expected number of colors,\n but they will cycle.\n\n If you are using the IPython notebook, you can also use the function\n :func:`choose_colorbrewer_palette` to interactively select palettes.\n\n Parameters\n ----------\n name : string\n Name of the palette. This should be a named matplotlib colormap.\n n_colors : int\n Number of discrete colors in the palette.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n Examples\n --------\n\n Create a qualitative colorbrewer palette with 8 colors:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.mpl_palette(\"Set2\", 8))\n\n Create a sequential colorbrewer palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.mpl_palette(\"Blues\"))\n\n Create a diverging palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.mpl_palette(\"seismic\", 8))\n\n Create a \"dark\" sequential palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.mpl_palette(\"GnBu_d\"))\n\n \"\"\"\n if name.endswith(\"_d\"):\n sub_name = name[:-2]\n if sub_name.endswith(\"_r\"):\n reverse = True\n sub_name = sub_name[:-2]\n else:\n reverse = False\n pal = color_palette(sub_name, 2) + [\"#333333\"]\n if reverse:\n pal = pal[::-1]\n cmap = blend_palette(pal, n_colors, as_cmap=True)\n else:\n cmap = get_colormap(name)\n\n if name in MPL_QUAL_PALS:\n bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]\n else:\n bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]\n palette = list(map(tuple, cmap(bins)[:, :3]))\n\n if as_cmap:\n return cmap\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 438, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal = color_palette(sub_name, 2) + [\"#333333\"]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 441, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(pal, n_colors, as_cmap=True)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 443, "name": "get_colormap", "kind": "ref", "category": "function", "info": " cmap = get_colormap(name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 449, "name": "cmap", "kind": "ref", "category": "function", "info": " palette = list(map(tuple, cmap(bins)[:, :3]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 454, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 457, "name": "_color_to_rgb", "kind": "def", "category": "function", "info": "def _color_to_rgb(color, input):\n \"\"\"Add some more flexibility to color choices.\"\"\"\n if input == \"hls\":\n color = colorsys.hls_to_rgb(*color)\n elif input == \"husl\":\n color = husl.husl_to_rgb(*color)\n color = tuple(np.clip(color, 0, 1))\n elif input == \"xkcd\":\n color = xkcd_rgb[color]\n\n return mpl.colors.to_rgb(color)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 462, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " color = husl.husl_to_rgb(*color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 467, "name": "to_rgb", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgb(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 470, "name": "dark_palette", "kind": "def", "category": "function", "info": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from dark to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_dark_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex, rgb-tuple, or html color name\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n\n Generate a palette from an HTML color:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.dark_palette(\"purple\"))\n\n Generate a palette that decreases in lightness:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.dark_palette(\"seagreen\", reverse=True))\n\n Generate a palette from an HUSL-space seed:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.dark_palette((260, 75, 60), input=\"husl\"))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.dark_palette(\"#2ecc71\", as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 15\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 543, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 544, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 546, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 548, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 551, "name": "light_palette", "kind": "def", "category": "function", "info": "def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from light to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_light_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex code, html color name, or tuple in ``input`` space.\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n\n Generate a palette from an HTML color:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.light_palette(\"purple\"))\n\n Generate a palette that increases in lightness:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.light_palette(\"seagreen\", reverse=True))\n\n Generate a palette from an HUSL-space seed:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.light_palette((260, 75, 60), input=\"husl\"))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.light_palette(\"#2ecc71\", as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 95\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 624, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 625, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 627, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 629, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 632, "name": "diverging_palette", "kind": "def", "category": "function", "info": "def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa\n center=\"light\", as_cmap=False):\n \"\"\"Make a diverging palette between two HUSL colors.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_diverging_palette` function.\n\n Parameters\n ----------\n h_neg, h_pos : float in [0, 359]\n Anchor hues for negative and positive extents of the map.\n s : float in [0, 100], optional\n Anchor saturation for both extents of the map.\n l : float in [0, 100], optional\n Anchor lightness for both extents of the map.\n sep : int, optional\n Size of the intermediate region.\n n : int, optional\n Number of colors in the palette (if not returning a cmap)\n center : {\"light\", \"dark\"}, optional\n Whether the center of the palette is light or dark\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark values.\n light_palette : Create a sequential palette with light values.\n\n Examples\n --------\n\n Generate a blue-white-red palette:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.diverging_palette(240, 10, n=9))\n\n Generate a brighter green-white-purple palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.diverging_palette(150, 275, s=80, l=55, n=9))\n\n Generate a blue-black-red palette:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.diverging_palette(250, 15, s=75, l=40,\n ... n=9, center=\"dark\"))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.diverging_palette(220, 20, as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n \"\"\"\n palfunc = dict(dark=dark_palette, light=light_palette)[center]\n n_half = int(128 - (sep // 2))\n neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]\n mid = midpoint * sep\n pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 704, "name": "palfunc", "kind": "ref", "category": "function", "info": " neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 705, "name": "palfunc", "kind": "ref", "category": "function", "info": " pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 708, "name": "blend_palette", "kind": "ref", "category": "function", "info": " pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 712, "name": "blend_palette", "kind": "def", "category": "function", "info": "def blend_palette(colors, n_colors=6, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a palette that blends between a list of colors.\n\n Parameters\n ----------\n colors : sequence of colors in various formats interpreted by ``input``\n hex code, html color name, or tuple in ``input`` space.\n n_colors : int, optional\n Number of colors in the palette.\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n \"\"\"\n colors = [_color_to_rgb(color, input) for color in colors]\n name = \"blend\"\n pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n if not as_cmap:\n rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n pal = _ColorPalette(map(tuple, rgb_array))\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 729, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " colors = [_color_to_rgb(color, input) for color in colors]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 731, "name": "from_list", "kind": "ref", "category": "function", "info": " pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 733, "name": "pal", "kind": "ref", "category": "function", "info": " rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 734, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " pal = _ColorPalette(map(tuple, rgb_array))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 738, "name": "xkcd_palette", "kind": "def", "category": "function", "info": "def xkcd_palette(colors):\n \"\"\"Make a palette with color names from the xkcd color survey.\n\n See xkcd for the full list of colors: https://xkcd.com/color/rgb/\n\n This is just a simple wrapper around the ``seaborn.xkcd_rgb`` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the ``seaborn.xkcd_rgb`` dictionary.\n\n Returns\n -------\n palette : seaborn color palette\n Returns the list of colors as RGB tuples in an object that behaves like\n other seaborn color palettes.\n\n See Also\n --------\n crayon_palette : Make a palette with Crayola crayon colors.\n\n \"\"\"\n palette = [xkcd_rgb[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 762, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 765, "name": "crayon_palette", "kind": "def", "category": "function", "info": "def crayon_palette(colors):\n \"\"\"Make a palette with color names from Crayola crayons.\n\n Colors are taken from here:\n https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors\n\n This is just a simple wrapper around the ``seaborn.crayons`` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the ``seaborn.crayons`` dictionary.\n\n Returns\n -------\n palette : seaborn color palette\n Returns the list of colors as rgb tuples in an object that behaves like\n other seaborn color palettes.\n\n See Also\n --------\n xkcd_palette : Make a palette with named colors from the XKCD color survey.\n\n \"\"\"\n palette = [crayons[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 790, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 793, "name": "cubehelix_palette", "kind": "def", "category": "function", "info": "def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,\n light=.85, dark=.15, reverse=False, as_cmap=False):\n \"\"\"Make a sequential palette from the cubehelix system.\n\n This produces a colormap with linearly-decreasing (or increasing)\n brightness. That means that information will be preserved if printed to\n black and white or viewed by someone who is colorblind. \"cubehelix\" is\n also available as a matplotlib-based palette, but this function gives the\n user more control over the look of the palette and has a different set of\n defaults.\n\n In addition to using this function, it is also possible to generate a\n cubehelix palette generally in seaborn using a string-shorthand; see the\n example below.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n start : float, 0 <= start <= 3\n The hue at the start of the helix.\n rot : float\n Rotations around the hue wheel over the range of the palette.\n gamma : float 0 <= gamma\n Gamma factor to emphasize darker (gamma < 1) or lighter (gamma > 1)\n colors.\n hue : float, 0 <= hue <= 1\n Saturation of the colors.\n dark : float 0 <= dark <= 1\n Intensity of the darkest color in the palette.\n light : float 0 <= light <= 1\n Intensity of the lightest color in the palette.\n reverse : bool\n If True, the palette will go from dark to light.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.Colormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.Colormap`\n\n See Also\n --------\n choose_cubehelix_palette : Launch an interactive widget to select cubehelix\n palette parameters.\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n\n References\n ----------\n Green, D. A. (2011). \"A colour scheme for the display of astronomical\n intensity images\". Bulletin of the Astromical Society of India, Vol. 39,\n p. 289-295.\n\n Examples\n --------\n\n Generate the default palette:\n\n .. plot::\n :context: close-figs\n\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.palplot(sns.cubehelix_palette())\n\n Rotate backwards from the same starting location:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.cubehelix_palette(rot=-.4))\n\n Use a different starting point and shorter rotation:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.cubehelix_palette(start=2.8, rot=.1))\n\n Reverse the direction of the lightness ramp:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.cubehelix_palette(reverse=True))\n\n Generate a colormap object:\n\n .. plot::\n :context: close-figs\n\n >>> from numpy import arange\n >>> x = arange(25).reshape(5, 5)\n >>> cmap = sns.cubehelix_palette(as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n Use the full lightness range:\n\n .. plot::\n :context: close-figs\n\n >>> cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True)\n >>> ax = sns.heatmap(x, cmap=cmap)\n\n Use through the :func:`color_palette` interface:\n\n .. plot::\n :context: close-figs\n\n >>> sns.palplot(sns.color_palette(\"ch:2,r=.2,l=.6\"))\n\n \"\"\"\n def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 905, "name": "get_color_function", "kind": "def", "category": "function", "info": " def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 907, "name": "color", "kind": "def", "category": "function", "info": " def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 922, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"red\": get_color_function(-0.14861, 1.78277),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 923, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"green\": get_color_function(-0.29227, -0.90649),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 924, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"blue\": get_color_function(1.97294, 0.0),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 927, "name": "LinearSegmentedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 930, "name": "cmap", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 930, "name": "tolist", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 938, "name": "cmap", "kind": "ref", "category": "function", "info": " pal_256 = cmap(x_256)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 939, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 942, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 945, "name": "_parse_cubehelix_args", "kind": "def", "category": "function", "info": "def _parse_cubehelix_args(argstr):\n \"\"\"Turn stringified cubehelix params into args/kwargs.\"\"\"\n\n if argstr.startswith(\"ch:\"):\n argstr = argstr[3:]\n\n if argstr.endswith(\"_r\"):\n reverse = True\n argstr = argstr[:-2]\n else:\n reverse = False\n\n if not argstr:\n return [], {\"reverse\": reverse}\n\n all_args = argstr.split(\",\")\n\n args = [float(a.strip(\" \")) for a in all_args if \"=\" not in a]\n\n kwargs = [a.split(\"=\") for a in all_args if \"=\" in a]\n kwargs = {k.strip(\" \"): float(v.strip(\" \")) for k, v in kwargs}\n\n kwarg_map = dict(\n s=\"start\", r=\"rot\", g=\"gamma\",\n h=\"hue\", l=\"light\", d=\"dark\", # noqa: E741\n )\n\n kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}\n\n if reverse:\n kwargs[\"reverse\"] = True\n\n return args, kwargs\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 980, "name": "set_color_codes", "kind": "def", "category": "function", "info": "def set_color_codes(palette=\"deep\"):\n \"\"\"Change how matplotlib color shorthands are interpreted.\n\n Calling this will change how shorthand codes like \"b\" or \"g\"\n are interpreted by matplotlib in subsequent plots.\n\n Parameters\n ----------\n palette : {deep, muted, pastel, dark, bright, colorblind}\n Named seaborn palette to use as the source of colors.\n\n See Also\n --------\n set : Color codes can be set through the high-level seaborn style\n manager.\n set_palette : Color codes can also be set through the function that\n sets the matplotlib color cycle.\n\n Examples\n --------\n\n Map matplotlib color codes to the default seaborn palette.\n\n .. plot::\n :context: close-figs\n\n >>> import matplotlib.pyplot as plt\n >>> import seaborn as sns; sns.set_theme()\n >>> sns.set_color_codes()\n >>> _ = plt.plot([0, 1], color=\"r\")\n\n Use a different seaborn palette.\n\n .. plot::\n :context: close-figs\n\n >>> sns.set_color_codes(\"dark\")\n >>> _ = plt.plot([0, 1], color=\"g\")\n >>> _ = plt.plot([0, 2], color=\"m\")\n\n \"\"\"\n if palette == \"reset\":\n colors = [(0., 0., 1.), (0., .5, 0.), (1., 0., 0.), (.75, 0., .75),\n (.75, .75, 0.), (0., .75, .75), (0., 0., 0.)]\n elif not isinstance(palette, str):\n err = \"set_color_codes requires a named seaborn palette\"\n raise TypeError(err)\n elif palette in SEABORN_PALETTES:\n if not palette.endswith(\"6\"):\n palette = palette + \"6\"\n colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]\n else:\n err = f\"Cannot set colors with palette '{palette}'\"\n raise ValueError(err)\n\n for code, color in zip(\"bgrmyck\", colors):\n rgb = mpl.colors.colorConverter.to_rgb(color)\n mpl.colors.colorConverter.colors[code] = rgb\n mpl.colors.colorConverter.cache[code] = rgb\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 1036, "name": "to_rgb", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 81, "name": "set_theme", "kind": "def", "category": "function", "info": "def set_theme(context=\"notebook\", style=\"darkgrid\", palette=\"deep\",\n font=\"sans-serif\", font_scale=1, color_codes=True, rc=None):\n \"\"\"\n Set aspects of the visual theme for all matplotlib and seaborn plots.\n\n This function changes the global defaults for all plots using the\n matplotlib rcParams system. The themeing is decomposed into several distinct\n sets of parameter values.\n\n The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>`\n and :doc:`color palette <../tutorial/color_palettes>` tutorials.\n\n Parameters\n ----------\n context : string or dict\n Scaling parameters, see :func:`plotting_context`.\n style : string or dict\n Axes style parameters, see :func:`axes_style`.\n palette : string or sequence\n Color palette, see :func:`color_palette`.\n font : string\n Font family, see matplotlib font manager.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n rc : dict or None\n Dictionary of rc parameter mappings to override the above.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_theme.rst\n\n \"\"\"\n set_context(context, font_scale)\n set_style(style, rc={\"font.family\": font})\n set_palette(palette, color_codes=color_codes)\n if rc is not None:\n mpl.rcParams.update(rc)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 118, "name": "set_context", "kind": "ref", "category": "function", "info": " set_context(context, font_scale)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 119, "name": "set_style", "kind": "ref", "category": "function", "info": " set_style(style, rc={\"font.family\": font})\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 120, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(palette, color_codes=color_codes)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 131, "name": "set_theme", "kind": "ref", "category": "function", "info": " set_theme(*args, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 134, "name": "reset_defaults", "kind": "def", "category": "function", "info": "def reset_defaults():\n \"\"\"Restore all RC params to default settings.\"\"\"\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 139, "name": "reset_orig", "kind": "def", "category": "function", "info": "def reset_orig():\n \"\"\"Restore all RC params to original settings (respects custom rc).\"\"\"\n from . import _orig_rc_params\n mpl.rcParams.update(_orig_rc_params)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 145, "name": "axes_style", "kind": "def", "category": "function", "info": "def axes_style(style=None, rc=None):\n \"\"\"\n Get the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_style`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n style : None, dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/axes_style.rst\n\n \"\"\"\n if style is None:\n style_dict = {k: mpl.rcParams[k] for k in _style_keys}\n\n elif isinstance(style, dict):\n style_dict = style\n\n else:\n styles = [\"white\", \"dark\", \"whitegrid\", \"darkgrid\", \"ticks\"]\n if style not in styles:\n raise ValueError(f\"style must be one of {', '.join(styles)}\")\n\n # Define colors here\n dark_gray = \".15\"\n light_gray = \".8\"\n\n # Common parameters\n style_dict = {\n\n \"figure.facecolor\": \"white\",\n \"axes.labelcolor\": dark_gray,\n\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": dark_gray,\n \"ytick.color\": dark_gray,\n\n \"axes.axisbelow\": True,\n \"grid.linestyle\": \"-\",\n\n\n \"text.color\": dark_gray,\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\"Arial\", \"DejaVu Sans\", \"Liberation Sans\",\n \"Bitstream Vera Sans\", \"sans-serif\"],\n\n\n \"lines.solid_capstyle\": \"round\",\n \"patch.edgecolor\": \"w\",\n \"patch.force_edgecolor\": True,\n\n \"image.cmap\": \"rocket\",\n\n \"xtick.top\": False,\n \"ytick.right\": False,\n\n }\n\n # Set grid on or off\n if \"grid\" in style:\n style_dict.update({\n \"axes.grid\": True,\n })\n else:\n style_dict.update({\n \"axes.grid\": False,\n })\n\n # Set the color of the background, spines, and grids\n if style.startswith(\"dark\"):\n style_dict.update({\n\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"grid.color\": \"white\",\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style == \"whitegrid\":\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": light_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style in [\"white\", \"ticks\"]:\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": dark_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n # Show or hide the axes ticks\n if style == \"ticks\":\n style_dict.update({\n \"xtick.bottom\": True,\n \"ytick.left\": True,\n })\n else:\n style_dict.update({\n \"xtick.bottom\": False,\n \"ytick.left\": False,\n })\n\n # Remove entries that are not defined in the base list of valid keys\n # This lets us handle matplotlib <=/> 2.0\n style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _style_keys}\n style_dict.update(rc)\n\n # Wrap in an _AxesStyle object so this can be used in a with statement\n style_object = _AxesStyle(style_dict)\n\n return style_object\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 297, "name": "_AxesStyle", "kind": "ref", "category": "function", "info": " style_object = _AxesStyle(style_dict)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 302, "name": "set_style", "kind": "def", "category": "function", "info": "def set_style(style=None, rc=None):\n \"\"\"\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n See :func:`axes_style` to get the parameter values.\n\n Parameters\n ----------\n style : dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_style.rst\n\n \"\"\"\n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 330, "name": "axes_style", "kind": "ref", "category": "function", "info": " style_object = axes_style(style, rc)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 334, "name": "plotting_context", "kind": "def", "category": "function", "info": "def plotting_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Get the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_context`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n context : None, dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/plotting_context.rst\n\n \"\"\"\n if context is None:\n context_dict = {k: mpl.rcParams[k] for k in _context_keys}\n\n elif isinstance(context, dict):\n context_dict = context\n\n else:\n\n contexts = [\"paper\", \"notebook\", \"talk\", \"poster\"]\n if context not in contexts:\n raise ValueError(f\"context must be in {', '.join(contexts)}\")\n\n # Set up dictionary of default parameters\n texts_base_context = {\n\n \"font.size\": 12,\n \"axes.labelsize\": 12,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 11,\n \"ytick.labelsize\": 11,\n \"legend.fontsize\": 11,\n \"legend.title_fontsize\": 12,\n\n }\n\n base_context = {\n\n \"axes.linewidth\": 1.25,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.5,\n \"lines.markersize\": 6,\n \"patch.linewidth\": 1,\n\n \"xtick.major.width\": 1.25,\n \"ytick.major.width\": 1.25,\n \"xtick.minor.width\": 1,\n \"ytick.minor.width\": 1,\n\n \"xtick.major.size\": 6,\n \"ytick.major.size\": 6,\n \"xtick.minor.size\": 4,\n \"ytick.minor.size\": 4,\n\n }\n base_context.update(texts_base_context)\n\n # Scale all the parameters by the same factor depending on the context\n scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]\n context_dict = {k: v * scaling for k, v in base_context.items()}\n\n # Now independently scale the fonts\n font_keys = texts_base_context.keys()\n font_dict = {k: context_dict[k] * font_scale for k in font_keys}\n context_dict.update(font_dict)\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _context_keys}\n context_dict.update(rc)\n\n # Wrap in a _PlottingContext object so this can be used in a with statement\n context_object = _PlottingContext(context_dict)\n\n return context_object\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 430, "name": "_PlottingContext", "kind": "ref", "category": "function", "info": " context_object = _PlottingContext(context_dict)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 435, "name": "set_context", "kind": "def", "category": "function", "info": "def set_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Set the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n See :func:`plotting_context` to get the parameter values.\n\n Parameters\n ----------\n context : dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_context.rst\n\n \"\"\"\n context_object = plotting_context(context, font_scale, rc)\n mpl.rcParams.update(context_object)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 468, "name": "plotting_context", "kind": "ref", "category": "function", "info": " context_object = plotting_context(context, font_scale, rc)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 472, "name": "_RCAesthetics", "kind": "def", "category": "class", "info": "__enter__\t__exit__\t__call__"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 473, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n rc = mpl.rcParams\n self._orig = {k: rc[k] for k in self._keys}\n self._set(self)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 476, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 478, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 479, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self._orig)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 483, "name": "wrapper", "kind": "def", "category": "function", "info": " def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 485, "name": "func", "kind": "ref", "category": "function", "info": " return func(*args, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 489, "name": "_AxesStyle", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 495, "name": "_PlottingContext", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 501, "name": "set_palette", "kind": "def", "category": "function", "info": "def set_palette(palette, n_colors=None, desat=None, color_codes=False):\n \"\"\"Set the matplotlib color cycle using a seaborn palette.\n\n Parameters\n ----------\n palette : seaborn color paltte | matplotlib colormap | hls | husl\n Palette definition. Should be something :func:`color_palette` can process.\n n_colors : int\n Number of colors in the cycle. The default number of colors will depend\n on the format of ``palette``, see the :func:`color_palette`\n documentation for more information.\n desat : float\n Proportion to desaturate each color by.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n\n Examples\n --------\n >>> set_palette(\"Reds\")\n\n >>> set_palette(\"Set1\", 8, .75)\n\n See Also\n --------\n color_palette : build a color palette or set the color cycle temporarily\n in a ``with`` statement.\n set_context : set parameters to scale plot elements\n set_style : set the default parameters for figure style\n\n \"\"\"\n colors = palettes.color_palette(palette, n_colors, desat)\n cyl = cycler('color', colors)\n mpl.rcParams['axes.prop_cycle'] = cyl\n if color_codes:\n try:\n palettes.set_color_codes(palette)\n except (ValueError, TypeError):\n pass\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 532, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = palettes.color_palette(palette, n_colors, desat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 537, "name": "set_color_codes", "kind": "ref", "category": "function", "info": " palettes.set_color_codes(palette)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 24, "name": "_LinearPlotter", "kind": "def", "category": "class", "info": "establish_variables\tdropna\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 31, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, data, **kws):\n \"\"\"Extract variables from data or use directly.\"\"\"\n self.data = data\n\n # Validate the inputs\n any_strings = any([isinstance(v, str) for v in kws.values()])\n if any_strings and data is None:\n raise ValueError(\"Must pass `data` if using named variables.\")\n\n # Set the variables\n for var, val in kws.items():\n if isinstance(val, str):\n vector = data[val]\n elif isinstance(val, list):\n vector = np.asarray(val)\n else:\n vector = val\n if vector is not None and vector.shape != (1,):\n vector = np.squeeze(vector)\n if np.ndim(vector) > 1:\n err = \"regplot inputs must be 1d\"\n raise ValueError(err)\n setattr(self, var, vector)\n\n def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 55, "name": "dropna", "kind": "def", "category": "function", "info": " def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 69, "name": "_RegressionPlotter", "kind": "def", "category": "class", "info": "__init__\tscatter_data\testimate_data\tfit_regression\tfit_fast\tfit_poly\tfit_statsmodels\tfit_lowess\tfit_logx\tbin_predictor\tregress_out\tplot\tscatterplot\tlineplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 106, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(data, x=x, y=y, units=units,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 111, "name": "dropna", "kind": "ref", "category": "function", "info": " self.dropna(\"x\", \"y\", \"units\", \"x_partial\", \"y_partial\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 115, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.x = self.regress_out(self.x, self.x_partial)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 117, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.y = self.regress_out(self.y, self.y_partial)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 122, "name": "bin_predictor", "kind": "ref", "category": "function", "info": " x_discrete, x_bins = self.bin_predictor(x_bins)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 136, "name": "scatter_data", "kind": "def", "category": "function", "info": " def scatter_data(self):\n \"\"\"Data where each observation is a point.\"\"\"\n x_j = self.x_jitter\n if x_j is None:\n x = self.x\n else:\n x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n\n y_j = self.y_jitter\n if y_j is None:\n y = self.y\n else:\n y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n\n return x, y\n\n @property\n def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 142, "name": "uniform", "kind": "ref", "category": "function", "info": " x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 148, "name": "uniform", "kind": "ref", "category": "function", "info": " y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 153, "name": "estimate_data", "kind": "def", "category": "function", "info": " def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 163, "name": "x_estimator", "kind": "ref", "category": "function", "info": " est = self.x_estimator(_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 177, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = algo.bootstrap(_y,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 182, "name": "ci", "kind": "ref", "category": "function", "info": " _ci = utils.ci(boots, self.x_ci)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 187, "name": "fit_regression", "kind": "def", "category": "function", "info": " def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 197, "name": "get_xlim", "kind": "ref", "category": "function", "info": " x_min, x_max = ax.get_xlim()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 203, "name": "fit_poly", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_poly(grid, self.order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 207, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 211, "name": "fit_lowess", "kind": "ref", "category": "function", "info": " grid, yhat = self.fit_lowess()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 214, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 216, "name": "fit_logx", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_logx(grid)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 218, "name": "fit_fast", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_fast(grid)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 224, "name": "ci", "kind": "ref", "category": "function", "info": " err_bands = utils.ci(yhat_boots, ci, axis=0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 228, "name": "fit_fast", "kind": "def", "category": "function", "info": " def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 230, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 231, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 235, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 239, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 247, "name": "fit_poly", "kind": "def", "category": "function", "info": " def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 249, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 253, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(x, y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 257, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(x, y,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 264, "name": "fit_statsmodels", "kind": "def", "category": "function", "info": " def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 270, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 272, "name": "model", "kind": "ref", "category": "function", "info": " yhat = model(_y, _x, **kwargs).fit().predict(grid)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 278, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(X, y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 282, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 289, "name": "fit_lowess", "kind": "def", "category": "function", "info": " def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 295, "name": "fit_logx", "kind": "def", "category": "function", "info": " def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 300, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 302, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 304, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 308, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 316, "name": "bin_predictor", "kind": "def", "category": "function", "info": " def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 330, "name": "regress_out", "kind": "def", "category": "function", "info": " def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 336, "name": "pinv", "kind": "ref", "category": "function", "info": " a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 350, "name": "get_color", "kind": "ref", "category": "function", "info": " color = lines.get_color()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 364, "name": "scatterplot", "kind": "ref", "category": "function", "info": " self.scatterplot(ax, scatter_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 367, "name": "lineplot", "kind": "ref", "category": "function", "info": " self.lineplot(ax, line_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 371, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.x.name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 373, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.y.name)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 375, "name": "scatterplot", "kind": "def", "category": "function", "info": " def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 409, "name": "lineplot", "kind": "def", "category": "function", "info": " def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 412, "name": "fit_regression", "kind": "ref", "category": "function", "info": " grid, yhat, err_bands = self.fit_regression(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 559, "name": "lmplot", "kind": "def", "category": "function", "info": "def lmplot(\n data=None, *,\n x=None, y=None, hue=None, col=None, row=None,\n palette=None, col_wrap=None, height=5, aspect=1, markers=\"o\",\n sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,\n legend=True, legend_out=None, x_estimator=None, x_bins=None,\n x_ci=\"ci\", scatter=True, fit_reg=True, ci=95, n_boot=1000,\n units=None, seed=None, order=1, logistic=False, lowess=False,\n robust=False, logx=False, x_partial=None, y_partial=None,\n truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,\n line_kws=None, facet_kws=None,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 575, "name": "facet_kw_deprecation", "kind": "def", "category": "function", "info": " def facet_kw_deprecation(key, val):\n msg = (\n f\"{key} is deprecated from the `lmplot` function signature. \"\n \"Please update your code to pass it using `facet_kws`.\"\n )\n if val is not None:\n warnings.warn(msg, UserWarning)\n facet_kws[key] = val\n\n facet_kw_deprecation(\"sharex\", sharex)\n facet_kw_deprecation(\"sharey\", sharey)\n facet_kw_deprecation(\"legend_out\", legend_out)\n\n if data is None:\n raise TypeError(\"Missing required keyword argument `data`.\")\n\n # Reduce the dataframe to only needed columns\n need_cols = [x, y, hue, col, row, units, x_partial, y_partial]\n cols = np.unique([a for a in need_cols if a is not None]).tolist()\n data = data[cols]\n\n # Initialize the grid\n facets = FacetGrid(\n data, row=row, col=col, hue=hue,\n palette=palette,\n row_order=row_order, col_order=col_order, hue_order=hue_order,\n height=height, aspect=aspect, col_wrap=col_wrap,\n **facet_kws,\n )\n\n # Add the markers here as FacetGrid has figured out how many levels of the\n # hue variable are needed and we don't want to duplicate that process\n if facets.hue_names is None:\n n_markers = 1\n else:\n n_markers = len(facets.hue_names)\n if not isinstance(markers, list):\n markers = [markers] * n_markers\n if len(markers) != n_markers:\n raise ValueError(\"markers must be a singleton or a list of markers \"\n \"for each level of the hue variable\")\n facets.hue_kws = {\"marker\": markers}\n\n def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 584, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharex\", sharex)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 585, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharey\", sharey)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 586, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"legend_out\", legend_out)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 593, "name": "tolist", "kind": "ref", "category": "function", "info": " cols = np.unique([a for a in need_cols if a is not None]).tolist()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 597, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " facets = FacetGrid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 618, "name": "update_datalim", "kind": "def", "category": "function", "info": " def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 619, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 619, "name": "astype", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 620, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys, updatey=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 621, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scaley=False)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 623, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(update_datalim, x=x, y=y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 634, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 635, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " facets.set_axis_labels(x, y)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 639, "name": "add_legend", "kind": "ref", "category": "function", "info": " facets.add_legend()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 826, "name": "regplot", "kind": "def", "category": "function", "info": "def regplot(\n data=None, *, x=None, y=None,\n x_estimator=None, x_bins=None, x_ci=\"ci\",\n scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,\n seed=None, order=1, logistic=False, lowess=False, robust=False,\n logx=False, x_partial=None, y_partial=None,\n truncate=True, dropna=True, x_jitter=None, y_jitter=None,\n label=None, color=None, marker=\"o\",\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 837, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 1022, "name": "residplot", "kind": "def", "category": "function", "info": "def residplot(\n data=None, *, x=None, y=None,\n x_partial=None, y_partial=None, lowess=False,\n order=1, robust=False, dropna=True, label=None, color=None,\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 1078, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, ci=None,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 1087, "name": "fit_regression", "kind": "ref", "category": "function", "info": " _, yhat, _ = plotter.fit_regression(grid=plotter.x)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 24, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": "_relational_narrative = DocstringComponents(dict(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 175, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 177, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 178, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " rel=DocstringComponents(_relational_docs),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 179, "name": "from_function_params", "kind": "ref", "category": "function", "info": " stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 183, "name": "_RelationalPlotter", "kind": "def", "category": "class", "info": "add_legend_data"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 192, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax):\n \"\"\"Add labeled artists to represent the different plot semantics.\"\"\"\n verbosity = self.legend\n if isinstance(verbosity, str) and verbosity not in [\"auto\", \"brief\", \"full\"]:\n err = \"`legend` must be 'auto', 'brief', 'full', or a boolean.\"\n raise ValueError(err)\n elif verbosity is True:\n verbosity = \"auto\"\n\n legend_kwargs = {}\n keys = []\n\n # Assign a legend title if there is only going to be one sub-legend,\n # otherwise, subtitles will be inserted into the texts list with an\n # invisible handle (which is a hack)\n titles = {\n title for title in\n (self.variables.get(v, None) for v in [\"hue\", \"size\", \"style\"])\n if title is not None\n }\n if len(titles) == 1:\n legend_title = titles.pop()\n else:\n legend_title = \"\"\n\n title_kws = dict(\n visible=False, color=\"w\", s=0, linewidth=0, marker=\"\", dashes=\"\"\n )\n\n def update(var_name, val_name, **kws):\n\n key = var_name, val_name\n if key in legend_kwargs:\n legend_kwargs[key].update(**kws)\n else:\n keys.append(key)\n\n legend_kwargs[key] = dict(**kws)\n\n # Define the maximum number of ticks to use for \"brief\" legends\n brief_ticks = 6\n\n # -- Add a legend for hue semantics\n brief_hue = self._hue_map.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(self._hue_map.levels) > brief_ticks)\n )\n if brief_hue:\n if isinstance(self._hue_map.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n limits = min(self._hue_map.levels), max(self._hue_map.levels)\n hue_levels, hue_formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[\"hue\"].infer_objects().dtype\n )\n elif self._hue_map.levels is None:\n hue_levels = hue_formatted_levels = []\n else:\n hue_levels = hue_formatted_levels = self._hue_map.levels\n\n # Add the hue semantic subtitle\n if not legend_title and self.variables.get(\"hue\", None) is not None:\n update((self.variables[\"hue\"], \"title\"),\n self.variables[\"hue\"], **title_kws)\n\n # Add the hue semantic labels\n for level, formatted_level in zip(hue_levels, hue_formatted_levels):\n if level is not None:\n color = self._hue_map(level)\n update(self.variables[\"hue\"], formatted_level, color=color)\n\n # -- Add a legend for size semantics\n brief_size = self._size_map.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(self._size_map.levels) > brief_ticks)\n )\n if brief_size:\n # Define how ticks will interpolate between the min/max data values\n if isinstance(self._size_map.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n # Define the min/max data values\n limits = min(self._size_map.levels), max(self._size_map.levels)\n size_levels, size_formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[\"size\"].infer_objects().dtype\n )\n elif self._size_map.levels is None:\n size_levels = size_formatted_levels = []\n else:\n size_levels = size_formatted_levels = self._size_map.levels\n\n # Add the size semantic subtitle\n if not legend_title and self.variables.get(\"size\", None) is not None:\n update((self.variables[\"size\"], \"title\"),\n self.variables[\"size\"], **title_kws)\n\n # Add the size semantic labels\n for level, formatted_level in zip(size_levels, size_formatted_levels):\n if level is not None:\n size = self._size_map(level)\n update(\n self.variables[\"size\"],\n formatted_level,\n linewidth=size,\n s=size,\n )\n\n # -- Add a legend for style semantics\n\n # Add the style semantic title\n if not legend_title and self.variables.get(\"style\", None) is not None:\n update((self.variables[\"style\"], \"title\"),\n self.variables[\"style\"], **title_kws)\n\n # Add the style semantic labels\n if self._style_map.levels is not None:\n for level in self._style_map.levels:\n if level is not None:\n attrs = self._style_map(level)\n update(\n self.variables[\"style\"],\n level,\n marker=attrs.get(\"marker\", \"\"),\n dashes=attrs.get(\"dashes\", \"\"),\n )\n\n func = getattr(ax, self._legend_func)\n\n legend_data = {}\n legend_order = []\n\n for key in keys:\n\n _, label = key\n kws = legend_kwargs[key]\n kws.setdefault(\"color\", \".2\")\n use_kws = {}\n for attr in self._legend_attributes + [\"visible\"]:\n if attr in kws:\n use_kws[attr] = kws[attr]\n artist = func([], [], label=label, **use_kws)\n if self._legend_func == \"plot\":\n artist = artist[0]\n legend_data[key] = artist\n legend_order.append(key)\n\n self.legend_title = legend_title\n self.legend_data = legend_data\n self.legend_order = legend_order\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 245, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " hue_levels, hue_formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 246, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[\"hue\"].infer_objects().dtype\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 261, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 277, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " size_levels, size_formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 278, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[\"size\"].infer_objects().dtype\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 293, "name": "_size_map", "kind": "ref", "category": "function", "info": " size = self._size_map(level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 312, "name": "_style_map", "kind": "ref", "category": "function", "info": " attrs = self._style_map(level)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 334, "name": "func", "kind": "ref", "category": "function", "info": " artist = func([], [], label=label, **use_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 345, "name": "_LinePlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 403, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " agg = EstimateAggregator(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 422, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 427, "name": "sort_values", "kind": "ref", "category": "function", "info": " sub_data = sub_data.sort_values(sort_cols)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 434, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped = sub_data.groupby(orient, sort=self.sort)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 437, "name": "apply", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, other).reset_index()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 437, "name": "reset_index", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, other).reset_index()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 441, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 449, "name": "groupby", "kind": "ref", "category": "function", "info": " for _, unit_data in sub_data.groupby(\"units\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 457, "name": "set_color", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 457, "name": "_hue_map", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 460, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 460, "name": "_size_map", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 463, "name": "_style_map", "kind": "ref", "category": "function", "info": " attributes = self._style_map(sub_vars[\"style\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 465, "name": "set_dashes", "kind": "ref", "category": "function", "info": " line.set_dashes(attributes[\"dashes\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 467, "name": "set_marker", "kind": "ref", "category": "function", "info": " line.set_marker(attributes[\"marker\"])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 469, "name": "get_color", "kind": "ref", "category": "function", "info": " line_color = line.get_color()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 470, "name": "get_alpha", "kind": "ref", "category": "function", "info": " line_alpha = line.get_alpha()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 471, "name": "get_solid_capstyle", "kind": "ref", "category": "function", "info": " line_capstyle = line.get_solid_capstyle()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 482, "name": "func", "kind": "ref", "category": "function", "info": " func(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 503, "name": "get_children", "kind": "ref", "category": "function", "info": " for obj in ebars.get_children():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 505, "name": "set_capstyle", "kind": "ref", "category": "function", "info": " obj.set_capstyle(line_capstyle)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 508, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 510, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 511, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 514, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 517, "name": "_ScatterPlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 539, "name": "dropna", "kind": "ref", "category": "function", "info": " data = self.plot_data.dropna()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 553, "name": "_style_map", "kind": "ref", "category": "function", "info": " example_marker = self._style_map(example_level, \"marker\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 561, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 562, "name": "is_filled", "kind": "ref", "category": "function", "info": " if m.is_filled():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 571, "name": "set_facecolors", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 571, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 574, "name": "set_sizes", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 574, "name": "_size_map", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 577, "name": "_style_map", "kind": "ref", "category": "function", "info": " p = [self._style_map(val, \"path\") for val in data[\"style\"]]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 578, "name": "set_paths", "kind": "ref", "category": "function", "info": " points.set_paths(p)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 583, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 584, "name": "set_linewidths", "kind": "ref", "category": "function", "info": " points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 587, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 589, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 590, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 593, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 596, "name": "lineplot", "kind": "def", "category": "function", "info": "def lineplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n dashes=True, markers=None, style_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, seed=None,\n orient=\"x\", sort=True, err_style=\"band\", err_kws=None,\n legend=\"auto\", ci=\"deprecated\", ax=None, **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 608, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = _deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 610, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _LinePlotter.get_semantics(locals())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 611, "name": "_LinePlotter", "kind": "ref", "category": "function", "info": " p = _LinePlotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 618, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 619, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 620, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 631, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 636, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 725, "name": "scatterplot", "kind": "def", "category": "function", "info": "def scatterplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=True, style_order=None, legend=\"auto\", ax=None,\n **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 734, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _ScatterPlotter.get_semantics(locals())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 735, "name": "_ScatterPlotter", "kind": "ref", "category": "function", "info": " p = _ScatterPlotter(data=data, variables=variables, legend=legend)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 737, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 738, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 739, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, order=style_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 747, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 752, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 818, "name": "relplot", "kind": "def", "category": "function", "info": "def relplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n row=None, col=None, col_wrap=None, row_order=None, col_order=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=None, dashes=None, style_order=None,\n legend=\"auto\", kind=\"scatter\", height=5, aspect=1, facet_kws=None,\n **kwargs\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 855, "name": "plotter", "kind": "ref", "category": "function", "info": " p = plotter(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 857, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=plotter.get_semantics(locals()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 860, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 861, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 862, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 880, "name": "_style_map", "kind": "ref", "category": "function", "info": " markers = {k: p._style_map(k, \"marker\") for k in style_order}\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 884, "name": "_style_map", "kind": "ref", "category": "function", "info": " dashes = {k: p._style_map(k, \"dashes\") for k in style_order}\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 909, "name": "assign_variables", "kind": "ref", "category": "function", "info": " p.assign_variables(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 935, "name": "rename", "kind": "ref", "category": "function", "info": " full_data = p.plot_data.rename(columns=new_cols)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 939, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 940, "name": "dropna", "kind": "ref", "category": "function", "info": " data=full_data.dropna(axis=1, how=\"all\"),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 948, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(func, **plot_kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 958, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " p.add_legend_data(g.axes.flat[0])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 960, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(legend_data=p.legend_data,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 970, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = g.data.rename(columns=orig_cols)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 23, "name": "ci_to_errsize", "kind": "def", "category": "function", "info": "def ci_to_errsize(cis, heights):\n \"\"\"Convert intervals to error arguments relative to plot heights.\n\n Parameters\n ----------\n cis : 2 x n sequence\n sequence of confidence interval limits\n heights : n sequence\n sequence of plot heights\n\n Returns\n -------\n errsize : 2 x n array\n sequence of error size relative to height values in correct\n format as argument for plt.bar\n\n \"\"\"\n cis = np.atleast_2d(cis).reshape(2, -1)\n heights = np.atleast_1d(heights)\n errsize = []\n for i, (low, high) in enumerate(np.transpose(cis)):\n h = heights[i]\n elow = h - low\n ehigh = high - h\n errsize.append([elow, ehigh])\n\n errsize = np.asarray(errsize).T\n return errsize\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 53, "name": "_normal_quantile_func", "kind": "def", "category": "function", "info": "def _normal_quantile_func(q):\n \"\"\"\n Compute the quantile function of the standard normal distribution.\n\n This wrapper exists because we are dropping scipy as a mandatory dependency\n but statistics.NormalDist was added to the standard library in 3.8.\n\n \"\"\"\n try:\n from statistics import NormalDist\n qf = np.vectorize(NormalDist().inv_cdf)\n except ImportError:\n try:\n from scipy.stats import norm\n qf = norm.ppf\n except ImportError:\n msg = (\n \"Standard normal quantile functions require either Python>=3.8 or scipy\"\n )\n raise RuntimeError(msg)\n return qf(q)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 73, "name": "qf", "kind": "ref", "category": "function", "info": " return qf(q)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 76, "name": "_draw_figure", "kind": "def", "category": "function", "info": "def _draw_figure(fig):\n \"\"\"Force draw of a matplotlib figure, accounting for back-compat.\"\"\"\n # See https://github.com/matplotlib/matplotlib/issues/19197 for context\n fig.canvas.draw()\n if fig.stale:\n try:\n fig.draw(fig.canvas.get_renderer())\n except AttributeError:\n pass\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 82, "name": "get_renderer", "kind": "ref", "category": "function", "info": " fig.draw(fig.canvas.get_renderer())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 87, "name": "_default_color", "kind": "def", "category": "function", "info": "def _default_color(method, hue, color, kws):\n \"\"\"If needed, get a default color by using the matplotlib property cycle.\"\"\"\n if hue is not None:\n # This warning is probably user-friendly, but it's currently triggered\n # in a FacetGrid context and I don't want to mess with that logic right now\n # if color is not None:\n # msg = \"`color` is ignored when `hue` is assigned.\"\n # warnings.warn(msg)\n return None\n\n if color is not None:\n return color\n\n elif method.__name__ == \"plot\":\n\n scout, = method([], [], scalex=False, scaley=False, **kws)\n color = scout.get_color()\n scout.remove()\n\n elif method.__name__ == \"scatter\":\n\n # Matplotlib will raise if the size of x/y don't match s/c,\n # and the latter might be in the kws dict\n scout_size = max(\n np.atleast_1d(kws.get(key, [])).shape[0]\n for key in [\"s\", \"c\", \"fc\", \"facecolor\", \"facecolors\"]\n )\n scout_x = scout_y = np.full(scout_size, np.nan)\n\n scout = method(scout_x, scout_y, **kws)\n facecolors = scout.get_facecolors()\n\n if not len(facecolors):\n # Handle bug in matplotlib <= 3.2 (I think)\n # This will limit the ability to use non color= kwargs to specify\n # a color in versions of matplotlib with the bug, but trying to\n # work out what the user wanted by re-implementing the broken logic\n # of inspecting the kwargs is probably too brittle.\n single_color = False\n else:\n single_color = np.unique(facecolors, axis=0).shape[0] == 1\n\n # Allow the user to specify an array of colors through various kwargs\n if \"c\" not in kws and single_color:\n color = to_rgb(facecolors[0])\n\n scout.remove()\n\n elif method.__name__ == \"bar\":\n\n # bar() needs masked, not empty data, to generate a patch\n scout, = method([np.nan], [np.nan], **kws)\n color = to_rgb(scout.get_facecolor())\n scout.remove()\n\n elif method.__name__ == \"fill_between\":\n\n # There is a bug on matplotlib < 3.3 where fill_between with\n # datetime units and empty data will set incorrect autoscale limits\n # To workaround it, we'll always return the first color in the cycle.\n # https://github.com/matplotlib/matplotlib/issues/17586\n ax = method.__self__\n datetime_axis = any([\n isinstance(ax.xaxis.converter, mpl.dates.DateConverter),\n isinstance(ax.yaxis.converter, mpl.dates.DateConverter),\n ])\n if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n return \"C0\"\n\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n\n scout = method([], [], **kws)\n facecolor = scout.get_facecolor()\n color = to_rgb(facecolor[0])\n scout.remove()\n\n return color\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 102, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([], [], scalex=False, scaley=False, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 103, "name": "get_color", "kind": "ref", "category": "function", "info": " color = scout.get_color()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 116, "name": "method", "kind": "ref", "category": "function", "info": " scout = method(scout_x, scout_y, **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 117, "name": "get_facecolors", "kind": "ref", "category": "function", "info": " facecolors = scout.get_facecolors()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 138, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([np.nan], [np.nan], **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 139, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " color = to_rgb(scout.get_facecolor())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 153, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 153, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 156, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 158, "name": "method", "kind": "ref", "category": "function", "info": " scout = method([], [], **kws)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 159, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " facecolor = scout.get_facecolor()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 166, "name": "desaturate", "kind": "def", "category": "function", "info": "def desaturate(color, prop):\n \"\"\"Decrease the saturation channel of a color by some percent.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n prop : float\n saturation channel of color will be multiplied by this value\n\n Returns\n -------\n new_color : rgb tuple\n desaturated color code in RGB tuple representation\n\n \"\"\"\n # Check inputs\n if not 0 <= prop <= 1:\n raise ValueError(\"prop must be between 0 and 1\")\n\n # Get rgb tuple rep\n rgb = to_rgb(color)\n\n # Convert to hls\n h, l, s = colorsys.rgb_to_hls(*rgb)\n\n # Desaturate the saturation channel\n s *= prop\n\n # Convert back to rgb\n new_color = colorsys.hls_to_rgb(h, l, s)\n\n return new_color\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 201, "name": "saturate", "kind": "def", "category": "function", "info": "def saturate(color):\n \"\"\"Return a fully saturated color with the same hue.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n\n Returns\n -------\n new_color : rgb tuple\n saturated color code in RGB tuple representation\n\n \"\"\"\n return set_hls_values(color, s=1)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 215, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " return set_hls_values(color, s=1)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 218, "name": "set_hls_values", "kind": "def", "category": "function", "info": "def set_hls_values(color, h=None, l=None, s=None): # noqa\n \"\"\"Independently manipulate the h, l, or s channels of a color.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n h, l, s : floats between 0 and 1, or None\n new values for each channel in hls space\n\n Returns\n -------\n new_color : rgb tuple\n new color code in RGB tuple representation\n\n \"\"\"\n # Get an RGB tuple representation\n rgb = to_rgb(color)\n vals = list(colorsys.rgb_to_hls(*rgb))\n for i, val in enumerate([h, l, s]):\n if val is not None:\n vals[i] = val\n\n rgb = colorsys.hls_to_rgb(*vals)\n return rgb\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 245, "name": "axlabel", "kind": "def", "category": "function", "info": "def axlabel(xlabel, ylabel, **kwargs):\n \"\"\"Grab current axis and label it.\n\n DEPRECATED: will be removed in a future version.\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg, FutureWarning)\n ax = plt.gca()\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 254, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 255, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 258, "name": "remove_na", "kind": "def", "category": "function", "info": "def remove_na(vector):\n \"\"\"Helper method for removing null values from data vectors.\n\n Parameters\n ----------\n vector : vector object\n Must implement boolean masking with [] subscript syntax.\n\n Returns\n -------\n clean_clean : same type as ``vector``\n Vector of data with null values removed. May be a copy or a view.\n\n \"\"\"\n return vector[pd.notnull(vector)]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 275, "name": "get_color_cycle", "kind": "def", "category": "function", "info": "def get_color_cycle():\n \"\"\"Return the list of colors in the current matplotlib color cycle\n\n Parameters\n ----------\n None\n\n Returns\n -------\n colors : list\n List of matplotlib colors in the current cycle, or dark gray if\n the current color cycle is empty.\n \"\"\"\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 289, "name": "by_key", "kind": "ref", "category": "function", "info": " return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 292, "name": "despine", "kind": "def", "category": "function", "info": "def despine(fig=None, ax=None, top=True, right=True, left=False,\n bottom=False, offset=None, trim=False):\n \"\"\"Remove the top and right spines from plot(s).\n\n fig : matplotlib figure, optional\n Figure to despine all axes of, defaults to the current figure.\n ax : matplotlib axes, optional\n Specific axes object to despine. Ignored if fig is provided.\n top, right, left, bottom : boolean, optional\n If True, remove that spine.\n offset : int or dict, optional\n Absolute distance, in points, spines should be moved away\n from the axes (negative values move spines inward). A single value\n applies to all spines; a dict can be used to set offset values per\n side.\n trim : bool, optional\n If True, limit spines to the smallest and largest major tick\n on each non-despined axis.\n\n Returns\n -------\n None\n\n \"\"\"\n # Get references to the axes we want\n if fig is None and ax is None:\n axes = plt.gcf().axes\n elif fig is not None:\n axes = fig.axes\n elif ax is not None:\n axes = [ax]\n\n for ax_i in axes:\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n # Toggle the spine objects\n is_visible = not locals()[side]\n ax_i.spines[side].set_visible(is_visible)\n if offset is not None and is_visible:\n try:\n val = offset.get(side, 0)\n except AttributeError:\n val = offset\n ax_i.spines[side].set_position(('outward', val))\n\n # Potentially move the ticks\n if left and not right:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.minorTicks\n )\n ax_i.yaxis.set_ticks_position(\"right\")\n for t in ax_i.yaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.yaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if bottom and not top:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.minorTicks\n )\n ax_i.xaxis.set_ticks_position(\"top\")\n for t in ax_i.xaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.xaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if trim:\n # clip off the parts of the spines that extend past major ticks\n xticks = np.asarray(ax_i.get_xticks())\n if xticks.size:\n firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n xticks)[0]\n lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n xticks)[-1]\n ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n ax_i.spines['top'].set_bounds(firsttick, lasttick)\n newticks = xticks.compress(xticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_xticks(newticks)\n\n yticks = np.asarray(ax_i.get_yticks())\n if yticks.size:\n firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n yticks)[0]\n lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n yticks)[-1]\n ax_i.spines['left'].set_bounds(firsttick, lasttick)\n ax_i.spines['right'].set_bounds(firsttick, lasttick)\n newticks = yticks.compress(yticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_yticks(newticks)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 328, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_visible(is_visible)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 334, "name": "set_position", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_position(('outward', val))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 339, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 343, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 346, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.yaxis.set_ticks_position(\"right\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 348, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 350, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 354, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 358, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 361, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.xaxis.set_ticks_position(\"top\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 363, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 365, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 369, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = np.asarray(ax_i.get_xticks())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 371, "name": "get_xlim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 373, "name": "get_xlim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 375, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 376, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['top'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 379, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax_i.set_xticks(newticks)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 381, "name": "get_yticks", "kind": "ref", "category": "function", "info": " yticks = np.asarray(ax_i.get_yticks())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 383, "name": "get_ylim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 385, "name": "get_ylim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 387, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['left'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 388, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['right'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 391, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax_i.set_yticks(newticks)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 394, "name": "move_legend", "kind": "def", "category": "function", "info": "def move_legend(obj, loc, **kwargs):\n \"\"\"\n Recreate a plot's legend at a new location.\n\n The name is a slight misnomer. Matplotlib legends do not expose public\n control over their position parameters. So this function creates a new legend,\n copying over the data from the original object, which is then removed.\n\n Parameters\n ----------\n obj : the object with the plot\n This argument can be either a seaborn or matplotlib object:\n\n - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`\n - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`\n\n loc : str or int\n Location argument, as in :meth:`matplotlib.axes.Axes.legend`.\n\n kwargs\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.\n\n Examples\n --------\n\n .. include:: ../docstrings/move_legend.rst\n\n \"\"\"\n # This is a somewhat hackish solution that will hopefully be obviated by\n # upstream improvements to matplotlib legends that make them easier to\n # modify after creation.\n\n from seaborn.axisgrid import Grid # Avoid circular import\n\n # Locate the legend object and a method to recreate the legend\n if isinstance(obj, Grid):\n old_legend = obj.legend\n legend_func = obj.figure.legend\n elif isinstance(obj, mpl.axes.Axes):\n old_legend = obj.legend_\n legend_func = obj.legend\n elif isinstance(obj, mpl.figure.Figure):\n if obj.legends:\n old_legend = obj.legends[-1]\n else:\n old_legend = None\n legend_func = obj.legend\n else:\n err = \"`obj` must be a seaborn Grid or matplotlib Axes or Figure instance.\"\n raise TypeError(err)\n\n if old_legend is None:\n err = f\"{obj} has no legend attached.\"\n raise ValueError(err)\n\n # Extract the components of the legend we need to reuse\n handles = old_legend.legendHandles\n labels = [t.get_text() for t in old_legend.get_texts()]\n\n # Extract legend properties that can be passed to the recreation method\n # (Vexingly, these don't all round-trip)\n legend_kws = inspect.signature(mpl.legend.Legend).parameters\n props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n\n # Delegate default bbox_to_anchor rules to matplotlib\n props.pop(\"bbox_to_anchor\")\n\n # Try to propagate the existing title and font properties; respect new ones too\n title = props.pop(\"title\")\n if \"title\" in kwargs:\n title.set_text(kwargs.pop(\"title\"))\n title_kwargs = {k: v for k, v in kwargs.items() if k.startswith(\"title_\")}\n for key, val in title_kwargs.items():\n title.set(**{key[6:]: val})\n kwargs.pop(key)\n\n # Try to respect the frame visibility\n kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n\n # Remove the old legend and create the new one\n props.update(kwargs)\n old_legend.remove()\n new_legend = legend_func(handles, labels, loc=loc, **props)\n new_legend.set_title(title.get_text(), title.get_fontproperties())\n\n # Let the Grid object continue to track the correct legend object\n if isinstance(obj, Grid):\n obj._legend = new_legend\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 451, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 451, "name": "get_texts", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 456, "name": "properties", "kind": "ref", "category": "function", "info": " props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 464, "name": "set_text", "kind": "ref", "category": "function", "info": " title.set_text(kwargs.pop(\"title\"))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 471, "name": "get_visible", "kind": "ref", "category": "function", "info": " kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 476, "name": "legend_func", "kind": "ref", "category": "function", "info": " new_legend = legend_func(handles, labels, loc=loc, **props)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 477, "name": "set_title", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 477, "name": "get_text", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 477, "name": "get_fontproperties", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 484, "name": "_kde_support", "kind": "def", "category": "function", "info": "def _kde_support(data, bw, gridsize, cut, clip):\n \"\"\"Establish support for a kernel density estimate.\"\"\"\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n support = np.linspace(support_min, support_max, gridsize)\n\n return support\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 493, "name": "ci", "kind": "def", "category": "function", "info": "def ci(a, which=95, axis=None):\n \"\"\"Return a percentile range from an array of values.\"\"\"\n p = 50 - which / 2, 50 + which / 2\n return np.nanpercentile(a, p, axis)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 499, "name": "get_dataset_names", "kind": "def", "category": "function", "info": "def get_dataset_names():\n \"\"\"Report available example datasets, useful for reporting issues.\n\n Requires an internet connection.\n\n \"\"\"\n url = \"https://github.com/mwaskom/seaborn-data\"\n with urlopen(url) as resp:\n html = resp.read()\n\n pat = r\"/mwaskom/seaborn-data/blob/master/(\\w*).csv\"\n datasets = re.findall(pat, html.decode())\n return datasets\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 510, "name": "decode", "kind": "ref", "category": "function", "info": " datasets = re.findall(pat, html.decode())\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 514, "name": "get_data_home", "kind": "def", "category": "function", "info": "def get_data_home(data_home=None):\n \"\"\"Return a path to the cache directory for example datasets.\n\n This directory is used by :func:`load_dataset`.\n\n If the ``data_home`` argument is not provided, it will use a directory\n specified by the `SEABORN_DATA` environment variable (if it exists)\n or otherwise default to an OS-appropriate user cache location.\n\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 525, "name": "user_cache_dir", "kind": "ref", "category": "function", "info": " data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 526, "name": "expanduser", "kind": "ref", "category": "function", "info": " data_home = os.path.expanduser(data_home)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 527, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(data_home):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 532, "name": "load_dataset", "kind": "def", "category": "function", "info": "def load_dataset(name, cache=True, data_home=None, **kws):\n \"\"\"Load an example dataset from the online repository (requires internet).\n\n This function provides quick access to a small number of example datasets\n that are useful for documenting seaborn or generating reproducible examples\n for bug reports. It is not necessary for normal usage.\n\n Note that some of the datasets have a small amount of preprocessing applied\n to define a proper ordering for categorical variables.\n\n Use :func:`get_dataset_names` to see a list of available datasets.\n\n Parameters\n ----------\n name : str\n Name of the dataset (``{name}.csv`` on\n https://github.com/mwaskom/seaborn-data).\n cache : boolean, optional\n If True, try to load from the local cache first, and save to the cache\n if a download is required.\n data_home : string, optional\n The directory in which to cache data; see :func:`get_data_home`.\n kws : keys and values, optional\n Additional keyword arguments are passed to passed through to\n :func:`pandas.read_csv`.\n\n Returns\n -------\n df : :class:`pandas.DataFrame`\n Tabular data, possibly with some preprocessing applied.\n\n \"\"\"\n # A common beginner mistake is to assume that one's personal data needs\n # to be passed through this function to be usable with seaborn.\n # Let's provide a more helpful error than you would otherwise get.\n if isinstance(name, pd.DataFrame):\n err = (\n \"This function accepts only strings (the name of an example dataset). \"\n \"You passed a pandas DataFrame. If you have your own dataset, \"\n \"it is not necessary to use this function before plotting.\"\n )\n raise TypeError(err)\n\n url = f\"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/{name}.csv\"\n\n if cache:\n cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n if not os.path.exists(cache_path):\n if name not in get_dataset_names():\n raise ValueError(f\"'{name}' is not one of the example datasets.\")\n urlretrieve(url, cache_path)\n full_path = cache_path\n else:\n full_path = url\n\n df = pd.read_csv(full_path, **kws)\n\n if df.iloc[-1].isnull().all():\n df = df.iloc[:-1]\n\n # Set some columns as a categorical type with ordered levels\n\n if name == \"tips\":\n df[\"day\"] = pd.Categorical(df[\"day\"], [\"Thur\", \"Fri\", \"Sat\", \"Sun\"])\n df[\"sex\"] = pd.Categorical(df[\"sex\"], [\"Male\", \"Female\"])\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"Lunch\", \"Dinner\"])\n df[\"smoker\"] = pd.Categorical(df[\"smoker\"], [\"Yes\", \"No\"])\n\n elif name == \"flights\":\n months = df[\"month\"].str[:3]\n df[\"month\"] = pd.Categorical(months, months.unique())\n\n elif name == \"exercise\":\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"1 min\", \"15 min\", \"30 min\"])\n df[\"kind\"] = pd.Categorical(df[\"kind\"], [\"rest\", \"walking\", \"running\"])\n df[\"diet\"] = pd.Categorical(df[\"diet\"], [\"no fat\", \"low fat\"])\n\n elif name == \"titanic\":\n df[\"class\"] = pd.Categorical(df[\"class\"], [\"First\", \"Second\", \"Third\"])\n df[\"deck\"] = pd.Categorical(df[\"deck\"], list(\"ABCDEFG\"))\n\n elif name == \"penguins\":\n df[\"sex\"] = df[\"sex\"].str.title()\n\n elif name == \"diamonds\":\n df[\"color\"] = pd.Categorical(\n df[\"color\"], [\"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n )\n df[\"clarity\"] = pd.Categorical(\n df[\"clarity\"], [\"IF\", \"VVS1\", \"VVS2\", \"VS1\", \"VS2\", \"SI1\", \"SI2\", \"I1\"],\n )\n df[\"cut\"] = pd.Categorical(\n df[\"cut\"], [\"Ideal\", \"Premium\", \"Very Good\", \"Good\", \"Fair\"],\n )\n\n elif name == \"taxis\":\n df[\"pickup\"] = pd.to_datetime(df[\"pickup\"])\n df[\"dropoff\"] = pd.to_datetime(df[\"dropoff\"])\n\n elif name == \"seaice\":\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n elif name == \"dowjones\":\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n return df\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 578, "name": "get_data_home", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 578, "name": "basename", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 579, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(cache_path):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 580, "name": "get_dataset_names", "kind": "ref", "category": "function", "info": " if name not in get_dataset_names():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 640, "name": "axis_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axis_ticklabels_overlap(labels):\n \"\"\"Return a boolean for whether the list of ticklabels have overlaps.\n\n Parameters\n ----------\n labels : list of matplotlib ticklabels\n\n Returns\n -------\n overlap : boolean\n True if any of the labels overlap.\n\n \"\"\"\n if not labels:\n return False\n try:\n bboxes = [l.get_window_extent() for l in labels]\n overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n return max(overlaps) > 1\n except RuntimeError:\n # Issue on macos backend raises an error in the above code\n return False\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 656, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " bboxes = [l.get_window_extent() for l in labels]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 657, "name": "count_overlaps", "kind": "ref", "category": "function", "info": " overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 664, "name": "axes_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axes_ticklabels_overlap(ax):\n \"\"\"Return booleans for whether the x and y ticklabels on an Axes overlap.\n\n Parameters\n ----------\n ax : matplotlib Axes\n\n Returns\n -------\n x_overlap, y_overlap : booleans\n True when the labels on that axis overlap.\n\n \"\"\"\n return (axis_ticklabels_overlap(ax.get_xticklabels()),\n axis_ticklabels_overlap(ax.get_yticklabels()))\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 677, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 677, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 678, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 678, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 681, "name": "locator_to_legend_entries", "kind": "def", "category": "function", "info": "def locator_to_legend_entries(locator, limits, dtype):\n \"\"\"Return levels and formatted levels for brief numeric legends.\"\"\"\n raw_levels = locator.tick_values(*limits).astype(dtype)\n\n # The locator can return ticks outside the limits, clip them here\n raw_levels = [l for l in raw_levels if l >= limits[0] and l <= limits[1]]\n\n class dummy_axis:\n def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 683, "name": "tick_values", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 683, "name": "astype", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 688, "name": "dummy_axis", "kind": "def", "category": "class", "info": "get_view_interval"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 689, "name": "get_view_interval", "kind": "def", "category": "function", "info": " def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 696, "name": "dummy_axis", "kind": "ref", "category": "function", "info": " formatter.axis = dummy_axis()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 701, "name": "set_locs", "kind": "ref", "category": "function", "info": " formatter.set_locs(raw_levels)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 702, "name": "formatter", "kind": "ref", "category": "function", "info": " formatted_levels = [formatter(x) for x in raw_levels]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 707, "name": "relative_luminance", "kind": "def", "category": "function", "info": "def relative_luminance(color):\n \"\"\"Calculate the relative luminance of a color according to W3C standards\n\n Parameters\n ----------\n color : matplotlib color or sequence of matplotlib colors\n Hex code, rgb-tuple, or html color name.\n\n Returns\n -------\n luminance : float(s) between 0 and 1\n\n \"\"\"\n rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)\n lum = rgb.dot([.2126, .7152, .0722])\n try:\n return lum.item()\n except ValueError:\n return lum\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 720, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 724, "name": "item", "kind": "ref", "category": "function", "info": " return lum.item()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 729, "name": "to_utf8", "kind": "def", "category": "function", "info": "def to_utf8(obj):\n \"\"\"Return a string representing a Python object.\n\n Strings (i.e. type ``str``) are returned unchanged.\n\n Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.\n\n For other objects, the method ``__str__()`` is called, and the result is\n returned as a string.\n\n Parameters\n ----------\n obj : object\n Any Python object\n\n Returns\n -------\n s : str\n UTF-8-decoded string representation of ``obj``\n\n \"\"\"\n if isinstance(obj, str):\n return obj\n try:\n return obj.decode(encoding=\"utf-8\")\n except AttributeError: # obj is not bytes-like\n return str(obj)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 753, "name": "decode", "kind": "ref", "category": "function", "info": " return obj.decode(encoding=\"utf-8\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 758, "name": "_normalize_kwargs", "kind": "def", "category": "function", "info": "def _normalize_kwargs(kws, artist):\n \"\"\"Wrapper for mpl.cbook.normalize_kwargs that supports <= 3.2.1.\"\"\"\n _alias_map = {\n 'color': ['c'],\n 'linewidth': ['lw'],\n 'linestyle': ['ls'],\n 'facecolor': ['fc'],\n 'edgecolor': ['ec'],\n 'markerfacecolor': ['mfc'],\n 'markeredgecolor': ['mec'],\n 'markeredgewidth': ['mew'],\n 'markersize': ['ms']\n }\n try:\n kws = normalize_kwargs(kws, artist)\n except AttributeError:\n kws = normalize_kwargs(kws, _alias_map)\n return kws\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 778, "name": "_check_argument", "kind": "def", "category": "function", "info": "def _check_argument(param, options, value):\n \"\"\"Raise if value for param is not in options.\"\"\"\n if value not in options:\n raise ValueError(\n f\"`{param}` must be one of {options}, but {repr(value)} was passed.\"\n )\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 786, "name": "_assign_default_kwargs", "kind": "def", "category": "function", "info": "def _assign_default_kwargs(kws, call_func, source_func):\n \"\"\"Assign default kwargs for call_func using values from source_func.\"\"\"\n # This exists so that axes-level functions and figure-level functions can\n # both call a Plotter method while having the default kwargs be defined in\n # the signature of the axes-level function.\n # An alternative would be to have a decorator on the method that sets its\n # defaults based on those defined in the axes-level function.\n # Then the figure-level function would not need to worry about defaults.\n # I am not sure which is better.\n needed = inspect.signature(call_func).parameters\n defaults = inspect.signature(source_func).parameters\n\n for param in needed:\n if param in defaults and param not in kws:\n kws[param] = defaults[param].default\n\n return kws\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 805, "name": "adjust_legend_subtitles", "kind": "def", "category": "function", "info": "def adjust_legend_subtitles(legend):\n \"\"\"\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n \"\"\"\n # Legend title not in rcParams until 3.0\n font_size = plt.rcParams.get(\"legend.title_fontsize\", None)\n hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n for hpack in hpackers:\n draw_area, text_area = hpack.get_children()\n handles = draw_area.get_children()\n if not all(artist.get_visible() for artist in handles):\n draw_area.set_width(0)\n for text in text_area.get_children():\n if font_size is not None:\n text.set_size(font_size)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 814, "name": "get_children", "kind": "ref", "category": "function", "info": " hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 816, "name": "get_children", "kind": "ref", "category": "function", "info": " draw_area, text_area = hpack.get_children()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 817, "name": "get_children", "kind": "ref", "category": "function", "info": " handles = draw_area.get_children()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 818, "name": "get_visible", "kind": "ref", "category": "function", "info": " if not all(artist.get_visible() for artist in handles):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 819, "name": "set_width", "kind": "ref", "category": "function", "info": " draw_area.set_width(0)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 820, "name": "get_children", "kind": "ref", "category": "function", "info": " for text in text_area.get_children():\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 822, "name": "set_size", "kind": "ref", "category": "function", "info": " text.set_size(font_size)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 825, "name": "_deprecate_ci", "kind": "def", "category": "function", "info": "def _deprecate_ci(errorbar, ci):\n \"\"\"\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n \"\"\"\n if ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n msg = (\n \"\\n\\nThe `ci` parameter is deprecated. \"\n f\"Use `errorbar={repr(errorbar)}` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return errorbar\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 851, "name": "_disable_autolayout", "kind": "def", "category": "function", "info": "def _disable_autolayout():\n \"\"\"Context manager for preventing rc-controlled auto-layout behavior.\"\"\"\n # This is a workaround for an issue in matplotlib, for details see\n # https://github.com/mwaskom/seaborn/issues/2914\n # The only affect of this rcParam is to set the default value for\n # layout= in plt.figure, so we could just do that instead.\n # But then we would need to own the complexity of the transition\n # from tight_layout=True -> layout=\"tight\". This seems easier,\n # but can be removed when (if) that is simpler on the matplotlib side,\n # or if the layout algorithms are improved to handle figure legends.\n orig_val = mpl.rcParams[\"figure.autolayout\"]\n try:\n mpl.rcParams[\"figure.autolayout\"] = False\n yield\n finally:\n mpl.rcParams[\"figure.autolayout\"] = orig_val\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 35, "name": "_init_mutable_colormap", "kind": "def", "category": "function", "info": "def _init_mutable_colormap():\n \"\"\"Create a matplotlib colormap that will be updated by the widgets.\"\"\"\n greys = color_palette(\"Greys\", 256)\n cmap = LinearSegmentedColormap.from_list(\"interactive\", greys)\n cmap._init()\n cmap._set_extremes()\n return cmap\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 37, "name": "color_palette", "kind": "ref", "category": "function", "info": " greys = color_palette(\"Greys\", 256)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 44, "name": "_update_lut", "kind": "def", "category": "function", "info": "def _update_lut(cmap, colors):\n \"\"\"Change the LUT values in a matplotlib colormap in-place.\"\"\"\n cmap._lut[:256] = colors\n cmap._set_extremes()\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 50, "name": "_show_cmap", "kind": "def", "category": "function", "info": "def _show_cmap(cmap):\n \"\"\"Show a continuous matplotlib colormap.\"\"\"\n from .rcmod import axes_style # Avoid circular import\n with axes_style(\"white\"):\n f, ax = plt.subplots(figsize=(8.25, .75))\n ax.set(xticks=[], yticks=[])\n x = np.linspace(0, 1, 256)[np.newaxis, :]\n ax.pcolormesh(x, cmap=cmap)\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 53, "name": "axes_style", "kind": "ref", "category": "function", "info": " with axes_style(\"white\"):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 60, "name": "choose_colorbrewer_palette", "kind": "def", "category": "function", "info": "def choose_colorbrewer_palette(data_type, as_cmap=False):\n \"\"\"Select a palette from the ColorBrewer set.\n\n These palettes are built into matplotlib and can be used by name in\n many seaborn functions, or by passing the object returned by this function.\n\n Parameters\n ----------\n data_type : {'sequential', 'diverging', 'qualitative'}\n This describes the kind of data you want to visualize. See the seaborn\n color palette docs for more information about how to choose this value.\n Note that you can pass substrings (e.g. 'q' for 'qualitative.\n\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette from selected colors.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n\n \"\"\"\n if data_type.startswith(\"q\") and as_cmap:\n raise ValueError(\"Qualitative palettes cannot be colormaps.\")\n\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if data_type.startswith(\"s\"):\n opts = [\"Greys\", \"Reds\", \"Greens\", \"Blues\", \"Oranges\", \"Purples\",\n \"BuGn\", \"BuPu\", \"GnBu\", \"OrRd\", \"PuBu\", \"PuRd\", \"RdPu\", \"YlGn\",\n \"PuBuGn\", \"YlGnBu\", \"YlOrBr\", \"YlOrRd\"]\n variants = [\"regular\", \"reverse\", \"dark\"]\n\n @interact\n def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 97, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 106, "name": "choose_sequential", "kind": "def", "category": "function", "info": " def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 107, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 115, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 116, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 117, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 119, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 120, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 128, "name": "choose_diverging", "kind": "def", "category": "function", "info": " def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 129, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 134, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 135, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 136, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 138, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 139, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 146, "name": "choose_qualitative", "kind": "def", "category": "function", "info": " def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 147, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1)):\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 148, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 149, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 156, "name": "choose_dark_palette", "kind": "def", "category": "function", "info": "def choose_dark_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a dark sequential palette.\n\n This corresponds with the :func:`dark_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`dark_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 189, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 193, "name": "choose_dark_palette_rgb", "kind": "def", "category": "function", "info": " def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 199, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 200, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 201, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 203, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 204, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 208, "name": "choose_dark_palette_hls", "kind": "def", "category": "function", "info": " def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 214, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 215, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 216, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 218, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 219, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 223, "name": "choose_dark_palette_husl", "kind": "def", "category": "function", "info": " def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 229, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 230, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 231, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 233, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 234, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 241, "name": "choose_light_palette", "kind": "def", "category": "function", "info": "def choose_light_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a light sequential palette.\n\n This corresponds with the :func:`light_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`light_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n dark_palette : Create a sequential palette with dark low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 274, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 278, "name": "choose_light_palette_rgb", "kind": "def", "category": "function", "info": " def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 284, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 285, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 286, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 288, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 289, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 293, "name": "choose_light_palette_hls", "kind": "def", "category": "function", "info": " def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 299, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 300, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 301, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 303, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 304, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 308, "name": "choose_light_palette_husl", "kind": "def", "category": "function", "info": " def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 314, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 315, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 316, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 318, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 319, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 326, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": "def choose_diverging_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to choose a diverging color palette.\n\n This corresponds with the :func:`diverging_palette` function. This kind\n of palette is good for data that range between interesting low values\n and interesting high values with a meaningful midpoint. (For example,\n change scores relative to some baseline value).\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n diverging_palette : Create a diverging color palette or colormap.\n choose_colorbrewer_palette : Interactively choose palettes from the\n colorbrewer set, including diverging palettes.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 356, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 359, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": " def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 360, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_neg=IntSlider(min=0,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 363, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_pos=IntSlider(min=0,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 366, "name": "IntSlider", "kind": "ref", "category": "function", "info": " s=IntSlider(min=0, max=99, value=74),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 367, "name": "IntSlider", "kind": "ref", "category": "function", "info": " l=IntSlider(min=0, max=99, value=50), # noqa: E741\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 368, "name": "IntSlider", "kind": "ref", "category": "function", "info": " sep=IntSlider(min=1, max=50, value=10),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 373, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 374, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 375, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 377, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 378, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 385, "name": "choose_cubehelix_palette", "kind": "def", "category": "function", "info": "def choose_cubehelix_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to create a sequential cubehelix palette.\n\n This corresponds with the :func:`cubehelix_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values. The cubehelix system allows the\n palette to have more hue variance across the range, which can be helpful\n for distinguishing a wider range of values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 415, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 418, "name": "choose_cubehelix", "kind": "def", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 418, "name": "IntSlider", "kind": "ref", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 419, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " start=FloatSlider(min=0, max=3, value=0),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 420, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " rot=FloatSlider(min=-1, max=1, value=.4),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 421, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " gamma=FloatSlider(min=0, max=5, value=1),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 422, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " hue=FloatSlider(min=0, max=1, value=.8),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 423, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " light=FloatSlider(min=0, max=1, value=.85),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 424, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " dark=FloatSlider(min=0, max=1, value=.15),\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 428, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = cubehelix_palette(256, start, rot, gamma,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 430, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 431, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 433, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n"}, {"fname": "playground/51bf0751-1cdd-4394-921b-9633c2448f22/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 435, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}] \ No newline at end of file diff --git a/tags_mwaskom__seaborn-3190.json b/tags_mwaskom__seaborn-3190.json new file mode 100644 index 0000000000000000000000000000000000000000..089d02a70f1252eb5e5571127e1ed00ede4ba94e --- /dev/null +++ b/tags_mwaskom__seaborn-3190.json @@ -0,0 +1 @@ +[{"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 11, "name": "rglob", "kind": "ref", "category": "function", "info": "py_files = path.rglob(\"*.py\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 12, "name": "rglob", "kind": "ref", "category": "function", "info": "ipynb_files = path.rglob(\"*.ipynb\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 18, "name": "read", "kind": "ref", "category": "function", "info": " datasets += re.findall(r\"load_dataset\\(['\\\"](\\w+)['\\\"]\", fid.read())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 22, "name": "read", "kind": "ref", "category": "function", "info": " datasets += re.findall(r\"load_dataset\\(\\\\['\\\"](\\w+)\\\\['\\\"]\", fid.read())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 26, "name": "load_dataset", "kind": "ref", "category": "function", "info": " load_dataset(name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/ci/check_gallery.py", "rel_fname": "ci/check_gallery.py", "line": 12, "name": "read", "kind": "ref", "category": "function", "info": " exec(fid.read())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 18, "name": "abspath", "kind": "ref", "category": "function", "info": "sys.path.insert(0, os.path.abspath('sphinxext'))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 125, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(path):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 21, "name": "execfile", "kind": "def", "category": "function", "info": "def execfile(filename, globals=None, locals=None):\n with open(filename, \"rb\") as fp:\n exec(compile(fp.read(), filename, 'exec'), globals, locals)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 127, "name": "create_thumbnail", "kind": "def", "category": "function", "info": "def create_thumbnail(infile, thumbfile,\n width=275, height=275,\n cx=0.5, cy=0.5, border=4):\n baseout, extout = op.splitext(thumbfile)\n\n im = matplotlib.image.imread(infile)\n rows, cols = im.shape[:2]\n x0 = int(cx * cols - .5 * width)\n y0 = int(cy * rows - .5 * height)\n xslice = slice(x0, x0 + width)\n yslice = slice(y0, y0 + height)\n thumb = im[yslice, xslice]\n thumb[:border, :, :3] = thumb[-border:, :, :3] = 0\n thumb[:, :border, :3] = thumb[:, -border:, :3] = 0\n\n dpi = 100\n fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)\n\n ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n frameon=False, xticks=[], yticks=[])\n if all(thumb.shape):\n ax.imshow(thumb, aspect='auto', resample=True,\n interpolation='bilinear')\n else:\n warnings.warn(\n f\"Bad thumbnail crop. {thumbfile} will be empty.\"\n )\n fig.savefig(thumbfile, dpi=dpi)\n return fig\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 145, "name": "add_axes", "kind": "ref", "category": "function", "info": " ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 158, "name": "indent", "kind": "def", "category": "function", "info": "def indent(s, N=4):\n \"\"\"indent a string\"\"\"\n return s.replace('\\n', '\\n' + N * ' ')\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 163, "name": "ExampleGenerator", "kind": "def", "category": "class", "info": "__init__\tdirname\tfname\tmodulename\tpyfilename\trstfilename\thtmlfilename\tpngfilename\tthumbfilename\tsphinxtag\tpagetitle\tplotfunc\tcomponents\textract_docstring\texec_file\ttoctree_entry\tcontents_entry"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 169, "name": "extract_docstring", "kind": "ref", "category": "function", "info": " self.extract_docstring()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 179, "name": "exec_file", "kind": "ref", "category": "function", "info": " self.exec_file()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 188, "name": "fname", "kind": "def", "category": "function", "info": " def fname(self):\n return op.split(self.filename)[1]\n\n @property\n def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 192, "name": "modulename", "kind": "def", "category": "function", "info": " def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 196, "name": "pyfilename", "kind": "def", "category": "function", "info": " def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 200, "name": "rstfilename", "kind": "def", "category": "function", "info": " def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 204, "name": "htmlfilename", "kind": "def", "category": "function", "info": " def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 208, "name": "pngfilename", "kind": "def", "category": "function", "info": " def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 213, "name": "thumbfilename", "kind": "def", "category": "function", "info": " def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 218, "name": "sphinxtag", "kind": "def", "category": "function", "info": " def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 222, "name": "pagetitle", "kind": "def", "category": "function", "info": " def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 226, "name": "plotfunc", "kind": "def", "category": "function", "info": " def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 239, "name": "components", "kind": "def", "category": "function", "info": " def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 251, "name": "extract_docstring", "kind": "def", "category": "function", "info": " def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 254, "name": "readlines", "kind": "ref", "category": "function", "info": " lines = open(self.filename).readlines()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 294, "name": "exec_file", "kind": "def", "category": "function", "info": " def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 300, "name": "execfile", "kind": "ref", "category": "function", "info": " execfile(self.filename, my_globals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 310, "name": "create_thumbnail", "kind": "ref", "category": "function", "info": " create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 312, "name": "toctree_entry", "kind": "def", "category": "function", "info": " def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 315, "name": "contents_entry", "kind": "def", "category": "function", "info": " def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 362, "name": "ExampleGenerator", "kind": "ref", "category": "function", "info": " ex = ExampleGenerator(filename, target_dir)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 377, "name": "toctree_entry", "kind": "ref", "category": "function", "info": " toctree += ex.toctree_entry()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 378, "name": "contents_entry", "kind": "ref", "category": "function", "info": " contents += ex.contents_entry()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 391, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect('builder-inited', main)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 48, "name": "main", "kind": "def", "category": "function", "info": "def main(app):\n\n content_yaml = Path(app.builder.srcdir) / \"tutorial.yaml\"\n tutorial_rst = Path(app.builder.srcdir) / \"tutorial.rst\"\n\n tutorial_dir = Path(app.builder.srcdir) / \"tutorial\"\n tutorial_dir.mkdir(exist_ok=True)\n\n with open(content_yaml) as fid:\n sections = yaml.load(fid, yaml.BaseLoader)\n\n for section in sections:\n title = section[\"title\"]\n section[\"header\"] = \"\\n\".join([title, \"-\" * len(title)]) if title else \"\"\n\n env = Environment().from_string(TEMPLATE)\n content = env.render(sections=sections)\n\n with open(tutorial_rst, \"w\") as fid:\n fid.write(content)\n\n for section in sections:\n for page in section[\"pages\"]:\n if (\n not (svg_path := tutorial_dir / f\"{page}.svg\").exists()\n or svg_path.stat().st_mtime < Path(__file__).stat().st_mtime\n ):\n write_thumbnail(svg_path, page)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 64, "name": "render", "kind": "ref", "category": "function", "info": " content = env.render(sections=sections)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 67, "name": "write", "kind": "ref", "category": "function", "info": " fid.write(content)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 75, "name": "write_thumbnail", "kind": "ref", "category": "function", "info": " write_thumbnail(svg_path, page)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 78, "name": "write_thumbnail", "kind": "def", "category": "function", "info": "def write_thumbnail(svg_path, page):\n\n with (\n sns.axes_style(\"dark\"),\n sns.plotting_context(\"notebook\"),\n sns.color_palette(\"deep\")\n ):\n fig = globals()[page]()\n for ax in fig.axes:\n ax.set(xticklabels=[], yticklabels=[], xlabel=\"\", ylabel=\"\", title=\"\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fig.tight_layout()\n fig.savefig(svg_path, format=\"svg\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 81, "name": "axes_style", "kind": "ref", "category": "function", "info": " sns.axes_style(\"dark\"),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 82, "name": "plotting_context", "kind": "ref", "category": "function", "info": " sns.plotting_context(\"notebook\"),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 83, "name": "color_palette", "kind": "ref", "category": "function", "info": " sns.color_palette(\"deep\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 90, "name": "tight_layout", "kind": "ref", "category": "function", "info": " fig.tight_layout()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 91, "name": "savefig", "kind": "ref", "category": "function", "info": " fig.savefig(svg_path, format=\"svg\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 94, "name": "introduction", "kind": "def", "category": "function", "info": "def introduction():\n\n tips = sns.load_dataset(\"tips\")\n fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n penguins = sns.load_dataset(\"penguins\")\n\n f = mpl.figure.Figure(figsize=(5, 5))\n with sns.axes_style(\"whitegrid\"):\n f.subplots(2, 2)\n\n sns.scatterplot(\n tips, x=\"total_bill\", y=\"tip\", hue=\"sex\", size=\"size\",\n alpha=.75, palette=[\"C0\", \".5\"], legend=False, ax=f.axes[0],\n )\n sns.kdeplot(\n tips.query(\"size != 5\"), x=\"total_bill\", hue=\"size\",\n palette=\"blend:C0,.5\", fill=True, linewidth=.5,\n legend=False, common_norm=False, ax=f.axes[1],\n )\n sns.lineplot(\n fmri, x=\"timepoint\", y=\"signal\", hue=\"event\",\n errorbar=(\"se\", 2), legend=False, palette=[\"C0\", \".5\"], ax=f.axes[2],\n )\n sns.boxplot(\n penguins, x=\"bill_depth_mm\", y=\"species\", hue=\"sex\",\n whiskerprops=dict(linewidth=1.5), medianprops=dict(linewidth=1.5),\n boxprops=dict(linewidth=1.5), capprops=dict(linewidth=0),\n width=.5, palette=[\"C0\", \".8\"], whis=5, ax=f.axes[3],\n )\n f.axes[3].legend_ = None\n for ax in f.axes:\n ax.set(xticks=[], yticks=[])\n return f\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 96, "name": "load_dataset", "kind": "ref", "category": "function", "info": " tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 97, "name": "load_dataset", "kind": "ref", "category": "function", "info": " fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 97, "name": "query", "kind": "ref", "category": "function", "info": " fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 98, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 100, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 101, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 102, "name": "subplots", "kind": "ref", "category": "function", "info": " f.subplots(2, 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 104, "name": "scatterplot", "kind": "ref", "category": "function", "info": " sns.scatterplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 108, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 109, "name": "query", "kind": "ref", "category": "function", "info": " tips.query(\"size != 5\"), x=\"total_bill\", hue=\"size\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 113, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 117, "name": "boxplot", "kind": "ref", "category": "function", "info": " sns.boxplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 129, "name": "function_overview", "kind": "def", "category": "function", "info": "def function_overview():\n\n from matplotlib.patches import FancyBboxPatch\n\n f = mpl.figure.Figure(figsize=(7, 5))\n with sns.axes_style(\"white\"):\n ax = f.subplots()\n f.subplots_adjust(0, 0, 1, 1)\n ax.set_axis_off()\n ax.set(xlim=(0, 1), ylim=(0, 1))\n\n deep = sns.color_palette(\"deep\")\n colors = dict(relational=deep[0], distributions=deep[1], categorical=deep[2])\n dark = sns.color_palette(\"dark\")\n text_colors = dict(relational=dark[0], distributions=dark[1], categorical=dark[2])\n\n functions = dict(\n relational=[\"scatterplot\", \"lineplot\"],\n distributions=[\"histplot\", \"kdeplot\", \"ecdfplot\", \"rugplot\"],\n categorical=[\n \"stripplot\", \"swarmplot\", \"boxplot\", \"violinplot\", \"pointplot\", \"barplot\"\n ],\n )\n pad, w, h = .06, .2, .15\n xs, y = np.arange(0, 1, 1 / 3) + pad * 1.05, .7\n for x, mod in zip(xs, functions):\n color = colors[mod] + (.2,)\n text_color = text_colors[mod]\n ax.add_artist(FancyBboxPatch((x, y), w, h, f\"round,pad={pad}\", color=\"white\"))\n ax.add_artist(FancyBboxPatch(\n (x, y), w, h, f\"round,pad={pad}\",\n linewidth=1, edgecolor=text_color, facecolor=color,\n ))\n ax.text(\n x + w / 2, y + h / 2, f\"{mod[:3]}plot\\n({mod})\",\n ha=\"center\", va=\"center\", size=20, color=text_color\n )\n for i, func in enumerate(functions[mod]):\n x_i, y_i = x + w / 2, y - i * .1 - h / 2 - pad\n xy = x_i - w / 2, y_i - pad / 3\n ax.add_artist(\n FancyBboxPatch(xy, w, h / 4, f\"round,pad={pad / 3}\", color=\"white\")\n )\n ax.add_artist(FancyBboxPatch(\n xy, w, h / 4, f\"round,pad={pad / 3}\",\n linewidth=1, edgecolor=text_color, facecolor=color\n ))\n ax.text(x_i, y_i, func, ha=\"center\", va=\"center\", size=16, color=text_color)\n ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)\n return f\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 133, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(7, 5))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 134, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 135, "name": "subplots", "kind": "ref", "category": "function", "info": " ax = f.subplots()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 136, "name": "subplots_adjust", "kind": "ref", "category": "function", "info": " f.subplots_adjust(0, 0, 1, 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 137, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 140, "name": "color_palette", "kind": "ref", "category": "function", "info": " deep = sns.color_palette(\"deep\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 142, "name": "color_palette", "kind": "ref", "category": "function", "info": " dark = sns.color_palette(\"dark\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 157, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch((x, y), w, h, f\"round,pad={pad}\", color=\"white\"))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 158, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 162, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 169, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 172, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 176, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(x_i, y_i, func, ha=\"center\", va=\"center\", size=16, color=text_color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 177, "name": "plot", "kind": "ref", "category": "function", "info": " ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 181, "name": "data_structure", "kind": "def", "category": "function", "info": "def data_structure():\n\n f = mpl.figure.Figure(figsize=(7, 5))\n gs = mpl.gridspec.GridSpec(\n figure=f, ncols=6, nrows=2, height_ratios=(1, 20),\n left=0, right=.35, bottom=0, top=.9, wspace=.1, hspace=.01\n )\n colors = [c + (.5,) for c in sns.color_palette(\"deep\")]\n f.add_subplot(gs[0, :], facecolor=\".8\")\n for i in range(gs.ncols):\n f.add_subplot(gs[1:, i], facecolor=colors[i])\n\n gs = mpl.gridspec.GridSpec(\n figure=f, ncols=2, nrows=2, height_ratios=(1, 8), width_ratios=(1, 11),\n left=.4, right=1, bottom=.2, top=.8, wspace=.015, hspace=.02\n )\n f.add_subplot(gs[0, 1:], facecolor=colors[2])\n f.add_subplot(gs[1:, 0], facecolor=colors[1])\n f.add_subplot(gs[1, 1], facecolor=colors[0])\n return f\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 183, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(7, 5))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 184, "name": "GridSpec", "kind": "ref", "category": "function", "info": " gs = mpl.gridspec.GridSpec(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 188, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = [c + (.5,) for c in sns.color_palette(\"deep\")]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 189, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[0, :], facecolor=\".8\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 191, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1:, i], facecolor=colors[i])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 193, "name": "GridSpec", "kind": "ref", "category": "function", "info": " gs = mpl.gridspec.GridSpec(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 197, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[0, 1:], facecolor=colors[2])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 198, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1:, 0], facecolor=colors[1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 199, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1, 1], facecolor=colors[0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 203, "name": "error_bars", "kind": "def", "category": "function", "info": "def error_bars():\n\n diamonds = sns.load_dataset(\"diamonds\")\n with sns.axes_style(\"whitegrid\"):\n g = sns.catplot(\n diamonds, x=\"carat\", y=\"clarity\", hue=\"clarity\", kind=\"point\",\n errorbar=(\"sd\", .5), join=False, legend=False, facet_kws={\"despine\": False},\n palette=\"ch:s=-.2,r=-.2,d=.4,l=.6_r\", scale=.75, capsize=.3,\n )\n g.ax.yaxis.set_inverted(False)\n return g.figure\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 205, "name": "load_dataset", "kind": "ref", "category": "function", "info": " diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 206, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 207, "name": "catplot", "kind": "ref", "category": "function", "info": " g = sns.catplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 212, "name": "set_inverted", "kind": "ref", "category": "function", "info": " g.ax.yaxis.set_inverted(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 218, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 223, "name": "Plot", "kind": "ref", "category": "function", "info": " p = so.Plot(x, y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 226, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps), color=map(str, x)),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 227, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\", pointsize=ps), alpha=x),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 228, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".9\", pointsize=ps, edgewidth=2), edgecolor=x),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 229, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\"), pointsize=x).scale(pointsize=(4, 18)),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 229, "name": "scale", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\"), pointsize=x).scale(pointsize=(4, 18)),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 230, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".9\", edgecolor=\".2\"), edgewidth=x),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 231, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".3\"), marker=map(str, x)),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 232, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".3\", marker=\"x\"), stroke=x),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 235, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 236, "name": "subplots", "kind": "ref", "category": "function", "info": " axs = f.subplots(len(plots))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 238, "name": "on", "kind": "ref", "category": "function", "info": " p.on(ax).plot()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 238, "name": "plot", "kind": "ref", "category": "function", "info": " p.on(ax).plot()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 240, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=ax, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 245, "name": "objects_interface", "kind": "def", "category": "function", "info": "def objects_interface():\n\n f = mpl.figure.Figure(figsize=(5, 4))\n C = sns.color_palette(\"deep\")\n ax = f.subplots()\n fontsize = 22\n rects = [((.135, .50), .69), ((.275, .38), .26), ((.59, .38), .40)]\n for i, (xy, w) in enumerate(rects):\n ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n ax.text(0, .52, \"Plot(data, 'x', 'y', color='var1')\", size=fontsize, color=\".2\")\n ax.text(0, .40, \".add(Dot(alpha=.5), marker='var2')\", size=fontsize, color=\".2\")\n annots = [\n (\"Mapped\\nin all layers\", (.48, .62), (0, 55)),\n (\"Set directly\", (.41, .35), (0, -55)),\n (\"Mapped\\nin this layer\", (.80, .35), (0, -55)),\n ]\n for i, (text, xy, xytext) in enumerate(annots):\n ax.annotate(\n text, xy, xytext,\n textcoords=\"offset points\", fontsize=18, ha=\"center\", va=\"center\",\n arrowprops=dict(arrowstyle=\"->\", linewidth=1.5, color=C[i]), color=C[i],\n )\n ax.set_axis_off()\n f.subplots_adjust(0, 0, 1, 1)\n\n return f\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 247, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 4))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 248, "name": "color_palette", "kind": "ref", "category": "function", "info": " C = sns.color_palette(\"deep\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 249, "name": "subplots", "kind": "ref", "category": "function", "info": " ax = f.subplots()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 253, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 253, "name": "Rectangle", "kind": "ref", "category": "function", "info": " ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 254, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(0, .52, \"Plot(data, 'x', 'y', color='var1')\", size=fontsize, color=\".2\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 255, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(0, .40, \".add(Dot(alpha=.5), marker='var2')\", size=fontsize, color=\".2\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 262, "name": "annotate", "kind": "ref", "category": "function", "info": " ax.annotate(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 267, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 268, "name": "subplots_adjust", "kind": "ref", "category": "function", "info": " f.subplots_adjust(0, 0, 1, 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 273, "name": "relational", "kind": "def", "category": "function", "info": "def relational():\n\n mpg = sns.load_dataset(\"mpg\")\n with sns.axes_style(\"ticks\"):\n g = sns.relplot(\n data=mpg, x=\"horsepower\", y=\"mpg\", size=\"displacement\", hue=\"weight\",\n sizes=(50, 500), hue_norm=(2000, 4500), alpha=.75, legend=False,\n palette=\"ch:start=-.5,rot=.7,dark=.3,light=.7_r\",\n )\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 275, "name": "load_dataset", "kind": "ref", "category": "function", "info": " mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 276, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 277, "name": "relplot", "kind": "ref", "category": "function", "info": " g = sns.relplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 282, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 286, "name": "distributions", "kind": "def", "category": "function", "info": "def distributions():\n\n penguins = sns.load_dataset(\"penguins\").dropna()\n with sns.axes_style(\"white\"):\n g = sns.displot(\n penguins, x=\"flipper_length_mm\", row=\"island\",\n binwidth=4, kde=True, line_kws=dict(linewidth=2), legend=False,\n )\n sns.despine(left=True)\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 288, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 288, "name": "dropna", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 289, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 290, "name": "displot", "kind": "ref", "category": "function", "info": " g = sns.displot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 294, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 295, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 299, "name": "categorical", "kind": "def", "category": "function", "info": "def categorical():\n\n penguins = sns.load_dataset(\"penguins\").dropna()\n with sns.axes_style(\"whitegrid\"):\n g = sns.catplot(\n penguins, x=\"sex\", y=\"body_mass_g\", hue=\"island\", col=\"sex\",\n kind=\"box\", whis=np.inf, legend=False, sharex=False,\n )\n sns.despine(left=True)\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 301, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 301, "name": "dropna", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 302, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 303, "name": "catplot", "kind": "ref", "category": "function", "info": " g = sns.catplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 307, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 308, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 312, "name": "regression", "kind": "def", "category": "function", "info": "def regression():\n\n anscombe = sns.load_dataset(\"anscombe\")\n with sns.axes_style(\"white\"):\n g = sns.lmplot(\n anscombe, x=\"x\", y=\"y\", hue=\"dataset\", col=\"dataset\", col_wrap=2,\n scatter_kws=dict(edgecolor=\".2\", facecolor=\".7\", s=80),\n line_kws=dict(lw=4), ci=None,\n )\n g.set(xlim=(2, None), ylim=(2, None))\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 314, "name": "load_dataset", "kind": "ref", "category": "function", "info": " anscombe = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 315, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 316, "name": "lmplot", "kind": "ref", "category": "function", "info": " g = sns.lmplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 322, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 326, "name": "axis_grids", "kind": "def", "category": "function", "info": "def axis_grids():\n\n penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n with sns.axes_style(\"ticks\"):\n g = sns.pairplot(\n penguins.drop(\"flipper_length_mm\", axis=1),\n diag_kind=\"kde\", diag_kws=dict(fill=False),\n plot_kws=dict(s=40, fc=\"none\", ec=\"C0\", alpha=.75, linewidth=.75),\n )\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 328, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 328, "name": "sample", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 329, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 330, "name": "pairplot", "kind": "ref", "category": "function", "info": " g = sns.pairplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 331, "name": "drop", "kind": "ref", "category": "function", "info": " penguins.drop(\"flipper_length_mm\", axis=1),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 335, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 339, "name": "aesthetics", "kind": "def", "category": "function", "info": "def aesthetics():\n\n f = mpl.figure.Figure(figsize=(5, 5))\n for i, style in enumerate([\"darkgrid\", \"white\", \"ticks\", \"whitegrid\"], 1):\n with sns.axes_style(style):\n ax = f.add_subplot(2, 2, i)\n ax.set(xticks=[0, .25, .5, .75, 1], yticks=[0, .25, .5, .75, 1])\n sns.despine(ax=f.axes[1])\n sns.despine(ax=f.axes[2])\n return f\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 341, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 343, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(style):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 344, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax = f.add_subplot(2, 2, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 346, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=f.axes[1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 347, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=f.axes[2])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 351, "name": "color_palettes", "kind": "def", "category": "function", "info": "def color_palettes():\n\n f = mpl.figure.Figure(figsize=(5, 5))\n palettes = [\"deep\", \"husl\", \"gray\", \"ch:\", \"mako\", \"vlag\", \"icefire\"]\n axs = f.subplots(len(palettes))\n x = np.arange(10)\n for ax, name in zip(axs, palettes):\n cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n ax.pcolormesh(x[None, :], linewidth=.5, edgecolor=\"w\", alpha=.8, cmap=cmap)\n ax.set_axis_off()\n return f\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 353, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 355, "name": "subplots", "kind": "ref", "category": "function", "info": " axs = f.subplots(len(palettes))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 358, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 358, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 359, "name": "pcolormesh", "kind": "ref", "category": "function", "info": " ax.pcolormesh(x[None, :], linewidth=.5, edgecolor=\"w\", alpha=.8, cmap=cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 360, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 364, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect(\"builder-inited\", main)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 365, "name": "connect", "kind": "ref", "category": "function", "info": " app.connect(\"builder-inited\", main)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 9, "name": "line_type", "kind": "def", "category": "function", "info": "def line_type(line):\n\n if line.startswith(\" \"):\n return \"code\"\n else:\n return \"markdown\"\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 17, "name": "add_cell", "kind": "def", "category": "function", "info": "def add_cell(nb, lines, cell_type):\n\n cell_objs = {\n \"code\": nbformat.v4.new_code_cell,\n \"markdown\": nbformat.v4.new_markdown_cell,\n }\n text = \"\\n\".join(lines)\n cell = cell_objs[cell_type](text)\n nb[\"cells\"].append(cell)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 36, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " lines = NumpyDocString(pydoc.getdoc(obj))[\"Examples\"]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 41, "name": "new_notebook", "kind": "ref", "category": "function", "info": " nb = nbformat.v4.new_notebook()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 57, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) != cell_type:\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 60, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 61, "name": "line_type", "kind": "ref", "category": "function", "info": " cell_type = line_type(line)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 64, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) == \"code\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 70, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 72, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f\"docstrings/{name}.ipynb\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 14, "name": "poisson_disc_sample", "kind": "def", "category": "function", "info": "def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):\n \"\"\"Find positions using poisson-disc sampling.\"\"\"\n # See http://bost.ocks.org/mike/algorithms/\n rng = np.random.default_rng(seed)\n uniform = rng.uniform\n randint = rng.integers\n\n # Cache the results\n key = array_radius, pad_radius, seed\n if key in XY_CACHE:\n return XY_CACHE[key]\n\n # Start at a fixed point we know will work\n start = np.zeros(d)\n samples = [start]\n queue = [start]\n\n while queue:\n\n # Pick a sample to expand from\n s_idx = randint(len(queue))\n s = queue[s_idx]\n\n for i in range(candidates):\n # Generate a candidate from this sample\n coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n\n # Check the three conditions to accept the candidate\n in_array = np.sqrt(np.sum(coords ** 2)) < array_radius\n in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)\n\n if in_array and in_ring:\n # Accept the candidate\n samples.append(coords)\n queue.append(coords)\n break\n\n if (i + 1) == candidates:\n # We've exhausted the particular sample\n queue.pop(s_idx)\n\n samples = np.array(samples)\n XY_CACHE[key] = samples\n return samples\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 17, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 34, "name": "randint", "kind": "ref", "category": "function", "info": " s_idx = randint(len(queue))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 39, "name": "uniform", "kind": "ref", "category": "function", "info": " coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 60, "name": "logo", "kind": "def", "category": "function", "info": "def logo(\n ax,\n color_kws, ring, ring_idx, edge,\n pdf_means, pdf_sigma, dy, y0, w, h,\n hist_mean, hist_sigma, hist_y0, lw, skip,\n scatter, pad, scale,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 70, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 71, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect('equal')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 79, "name": "gaussian", "kind": "ref", "category": "function", "info": " y = gaussian(x.size, pdf_sigma)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 97, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 104, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(bg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 115, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(wedge)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 120, "name": "gaussian", "kind": "ref", "category": "function", "info": " hist_y = gaussian(x.size, hist_sigma)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 133, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(fg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 138, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " u.set_clip_path(fg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 143, "name": "poisson_disc_sample", "kind": "ref", "category": "function", "info": " xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 153, "name": "get_paths", "kind": "ref", "category": "function", "info": " path = u.get_paths()[0]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "get_transform", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 155, "name": "set_visible", "kind": "ref", "category": "function", "info": " u.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 182, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " color = sns.cubehelix_palette(**kwargs[\"color_kws\"])[color_idx]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 187, "name": "logo", "kind": "ref", "category": "function", "info": " logo(ax, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 194, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 204, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 212, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 222, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 40, "name": "MetadataError", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 44, "name": "pop_recursive", "kind": "def", "category": "function", "info": "def pop_recursive(d, key, default=None):\n \"\"\"dict.pop(key) where `key` is a `.`-delimited list of nested keys.\n >>> d = {'a': {'b': 1, 'c': 2}}\n >>> pop_recursive(d, 'a.c')\n 2\n >>> d\n {'a': {'b': 1}}\n \"\"\"\n nested = key.split('.')\n current = d\n for k in nested[:-1]:\n if hasattr(current, 'get'):\n current = current.get(k, {})\n else:\n return default\n if not hasattr(current, 'pop'):\n return default\n return current.pop(nested[-1], default)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 64, "name": "strip_output", "kind": "def", "category": "function", "info": "def strip_output(nb):\n \"\"\"\n Strip the outputs, execution count/prompt number and miscellaneous\n metadata from a notebook object, unless specified to keep either the\n outputs or counts.\n \"\"\"\n keys = {'metadata': [], 'cell': {'metadata': [\"execution\"]}}\n\n nb.metadata.pop('signature', None)\n nb.metadata.pop('widgets', None)\n\n for field in keys['metadata']:\n pop_recursive(nb.metadata, field)\n\n if 'NB_KERNEL' in os.environ:\n nb.metadata['kernelspec']['name'] = os.environ['NB_KERNEL']\n nb.metadata['kernelspec']['display_name'] = os.environ['NB_KERNEL']\n\n for cell in nb.cells:\n\n if 'outputs' in cell:\n cell['outputs'] = []\n if 'prompt_number' in cell:\n cell['prompt_number'] = None\n if 'execution_count' in cell:\n cell['execution_count'] = None\n\n # Always remove this metadata\n for output_style in ['collapsed', 'scrolled']:\n if output_style in cell.metadata:\n cell.metadata[output_style] = False\n if 'metadata' in cell:\n for field in ['collapsed', 'scrolled', 'ExecuteTime']:\n cell.metadata.pop(field, None)\n for (extra, fields) in keys['cell'].items():\n if extra in cell:\n for field in fields:\n pop_recursive(getattr(cell, extra), field)\n return nb\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 76, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(nb.metadata, field)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 101, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(getattr(cell, extra), field)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 120, "name": "ExecutePreprocessor", "kind": "ref", "category": "function", "info": " ep = ExecutePreprocessor(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 125, "name": "preprocess", "kind": "ref", "category": "function", "info": " ep.preprocess(nb, {\"metadata\": {\"path\": basedir}})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 142, "name": "RSTExporter", "kind": "ref", "category": "function", "info": " exp = RSTExporter()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 151, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 151, "name": "TagRemovePreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "ExtractOutputPreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 154, "name": "from_notebook_node", "kind": "ref", "category": "function", "info": " body, resources = exp.from_notebook_node(nb)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 157, "name": "strip_output", "kind": "ref", "category": "function", "info": " nb = strip_output(nb)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 168, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(imdir):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 15, "name": "read", "kind": "ref", "category": "function", "info": " nb = nbformat.read(f, as_version=4)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 21, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "sns.lmplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 12, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f, left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 19, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=\"carat\", y=\"price\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "fmri = sns.load_dataset(\"fmri\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 14, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(x=\"timepoint\", y=\"signal\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 10, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "dots = sns.load_dataset(\"dots\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 13, "name": "color_palette", "kind": "ref", "category": "function", "info": "palette = sns.color_palette(\"rocket_r\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 16, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 12, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 18, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Body mass (g)\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 19, "name": "set_title", "kind": "ref", "category": "function", "info": "g.legend.set_title(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\", palette=\"pastel\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 14, "name": "boxplot", "kind": "ref", "category": "function", "info": "sns.boxplot(x=\"day\", y=\"total_bill\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(offset=10, trim=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 13, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=tips, x=\"day\", y=\"total_bill\", hue=\"smoker\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": " .get_level_values(\"network\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "corr", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "stack", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 23, "name": "reset_index", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 26, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 35, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 36, "name": "margins", "kind": "ref", "category": "function", "info": "g.ax.margins(.02)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 37, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": "for label in g.ax.get_xticklabels():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 38, "name": "set_rotation", "kind": "ref", "category": "function", "info": " label.set_rotation(90)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 40, "name": "set_edgecolor", "kind": "ref", "category": "function", "info": " artist.set_edgecolor(\".7\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(11)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 11, "name": "gamma", "kind": "ref", "category": "function", "info": "x = rs.gamma(2, size=1000)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = -.5 * x + rs.normal(size=1000)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 14, "name": "jointplot", "kind": "ref", "category": "function", "info": "sns.jointplot(x=x, y=y, kind=\"hex\", color=\"#4CB391\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 18, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 27, "name": "set_major_formatter", "kind": "ref", "category": "function", "info": "ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 28, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks([500, 1000, 2000, 5000, 10000])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 13, "name": "set_xscale", "kind": "ref", "category": "function", "info": "ax.set_xscale(\"log\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 16, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 23, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(x=\"distance\", y=\"method\", data=planets,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(trim=True, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 20, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 28, "name": "pointplot", "kind": "ref", "category": "function", "info": "sns.pointplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 35, "name": "move_legend", "kind": "ref", "category": "function", "info": "sns.move_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 12, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=planets, x=\"year\", y=\"distance\", marginal_ticks=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 18, "name": "add_axes", "kind": "ref", "category": "function", "info": "cax = g.figure.add_axes([.15, .55, .02, .2])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 21, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 25, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, element=\"step\", color=\"#03012d\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 13, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", rc={\"axes.facecolor\": (0, 0, 0, 0)})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 13, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(1979)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 14, "name": "randn", "kind": "ref", "category": "function", "info": "x = rs.randn(500)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 21, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "pal = sns.cubehelix_palette(10, rot=-.25, light=.7)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 22, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, row=\"g\", hue=\"g\", aspect=15, height=.5, palette=pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 31, "name": "refline", "kind": "ref", "category": "function", "info": "g.refline(y=0, linewidth=2, linestyle=\"-\", color=None, clip_on=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 35, "name": "label", "kind": "def", "category": "function", "info": "def label(x, color, label):\n ax = plt.gca()\n ax.text(0, .2, label, fontweight=\"bold\", color=color,\n ha=\"left\", va=\"center\", transform=ax.transAxes)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 47, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 49, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(bottom=True, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 11, "name": "boxenplot", "kind": "ref", "category": "function", "info": "sns.boxenplot(x=\"clarity\", y=\"carat\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rng = np.random.RandomState(0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 16, "name": "multivariate_normal", "kind": "ref", "category": "function", "info": "x, y = rng.multivariate_normal(mean, cov, n).T\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 20, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=x, y=y, s=5, color=\".15\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 21, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(x=x, y=y, bins=50, pthresh=.1, cmap=\"mako\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 22, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(x=x, y=y, levels=5, color=\"w\", linewidths=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 16, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(x=\"age\", y=\"survived\", col=\"sex\", hue=\"sex\", data=df,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(4)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 16, "name": "randint", "kind": "ref", "category": "function", "info": "pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 24, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "grid = sns.FacetGrid(df, col=\"walk\", hue=\"walk\", palette=\"tab20c\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 28, "name": "refline", "kind": "ref", "category": "function", "info": "grid.refline(y=0, linestyle=\":\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(33)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 16, "name": "normal", "kind": "ref", "category": "function", "info": "d = pd.DataFrame(data=rs.normal(size=(100, 26)),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 20, "name": "corr", "kind": "ref", "category": "function", "info": "corr = d.corr()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 29, "name": "diverging_palette", "kind": "ref", "category": "function", "info": "cmap = sns.diverging_palette(230, 20, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 32, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", color_codes=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=mpg, x=\"mpg\", y=\"acceleration\", space=0, ratio=17)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.scatterplot, size=mpg[\"horsepower\"], sizes=(30, 120),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 14, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.rugplot, height=1, color=\"g\", alpha=.6)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 14, "name": "set_aspect", "kind": "ref", "category": "function", "info": "ax.set_aspect(\"equal\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 17, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 18, "name": "query", "kind": "ref", "category": "function", "info": " data=iris.query(\"species != 'versicolor'\"),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 13, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 11, "name": "blend_palette", "kind": "ref", "category": "function", "info": "cmap = sns.blend_palette(colors, input=\"husl\", as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 12, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 20, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"Snoot length (mm)\", \"Snoot depth (mm)\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 11, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(df, diag_sharey=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 12, "name": "map_upper", "kind": "ref", "category": "function", "info": "g.map_upper(sns.scatterplot, s=15)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 13, "name": "map_lower", "kind": "ref", "category": "function", "info": "g.map_lower(sns.kdeplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 14, "name": "map_diag", "kind": "ref", "category": "function", "info": "g.map_diag(sns.kdeplot, lw=2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "titanic = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 12, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(titanic, y_vars=\"survived\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 19, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(fig=g.fig, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 34, "name": "grid", "kind": "ref", "category": "function", "info": " ax.xaxis.grid(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 35, "name": "grid", "kind": "ref", "category": "function", "info": " ax.yaxis.grid(True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 37, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", context=\"talk\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 9, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(8)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y1, palette=\"rocket\", ax=ax1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 19, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax1.set_ylabel(\"Sequential\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 23, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y2, palette=\"vlag\", ax=ax2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 25, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax2.set_ylabel(\"Diverging\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 28, "name": "choice", "kind": "ref", "category": "function", "info": "y3 = rs.choice(y1, len(y1), replace=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 29, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y3, palette=\"deep\", ax=ax3)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 31, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax3.set_ylabel(\"Qualitative\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 34, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 11, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(50)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 20, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " cmap = sns.cubehelix_palette(start=s, light=1, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 23, "name": "normal", "kind": "ref", "category": "function", "info": " x, y = rs.normal(size=(2, 50))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 24, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 31, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 16, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"pastel\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"total\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 21, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"muted\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 22, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"alcohol\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "exercise = sns.load_dataset(\"exercise\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 13, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 18, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 21, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, col=\"speed\", hue=\"speed\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 10, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(x=\"total_bill\", y=\"tip\", data=tips,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(7)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 11, "name": "normal", "kind": "ref", "category": "function", "info": "x = rs.normal(2, 1, 75)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = 2 + 1.5 * x + rs.normal(0, 2, 75)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 15, "name": "residplot", "kind": "ref", "category": "function", "info": "sns.residplot(x=x, y=y, lowess=True, color=\"g\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(x=\"horsepower\", y=\"mpg\", hue=\"origin\", size=\"weight\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\", palette=\"muted\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 14, "name": "swarmplot", "kind": "ref", "category": "function", "info": "ax = sns.swarmplot(data=df, x=\"body_mass_g\", y=\"sex\", hue=\"species\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 10, "name": "pairplot", "kind": "ref", "category": "function", "info": "sns.pairplot(df, hue=\"species\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 13, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "cmap = sns.cubehelix_palette(rot=-.2, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 21, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.xaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 22, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.yaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 23, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 11, "name": "default_rng", "kind": "ref", "category": "function", "info": "rs = np.random.default_rng(0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 13, "name": "normal", "kind": "ref", "category": "function", "info": "d = rs.normal(0, 2, (n, p))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 17, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=d, palette=\"light:g\", inner=\"points\", orient=\"h\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=df, x=\"body_mass_g\", y=\"bill_depth_mm\", space=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.kdeplot,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 15, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, color=\"#03051A\", alpha=1, bins=25)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights_long = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 11, "name": "pivot", "kind": "ref", "category": "function", "info": "flights = flights_long.pivot(\"month\", \"year\", \"passengers\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 15, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(flights, annot=True, fmt=\"d\", linewidths=.5, ax=ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 10, "name": "catplot", "kind": "ref", "category": "function", "info": "sns.catplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 14, "name": "regplot", "kind": "ref", "category": "function", "info": "sns.regplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 21, "name": "husl_palette", "kind": "ref", "category": "function", "info": "network_pal = sns.husl_palette(8, s=.45)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 25, "name": "get_level_values", "kind": "ref", "category": "function", "info": "networks = df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "clustermap", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "corr", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 11, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 13, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 24, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(.8, .85, year, transform=ax.transAxes, fontweight=\"bold\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 27, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "get_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 36, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 37, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Passengers\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 38, "name": "tight_layout", "kind": "ref", "category": "function", "info": "g.tight_layout()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 12, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(365)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 13, "name": "randn", "kind": "ref", "category": "function", "info": "values = rs.randn(365, 4).cumsum(axis=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 16, "name": "rolling", "kind": "ref", "category": "function", "info": "data = data.rolling(7).mean()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 18, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(data=data, palette=\"tab10\", linewidth=2.5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "corr", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "groupby", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "mean", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 22, "name": "astype", "kind": "ref", "category": "function", "info": "corr_df.index = corr_df.index.astype(int)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 23, "name": "sort_index", "kind": "ref", "category": "function", "info": "corr_df = corr_df.sort_index().T\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 33, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 5, "name": "MarkerStyle", "kind": "def", "category": "function", "info": "def MarkerStyle(marker=None, fillstyle=None):\n \"\"\"\n Allow MarkerStyle to accept a MarkerStyle object as parameter.\n\n Supports matplotlib < 3.3.0\n https://github.com/matplotlib/matplotlib/pull/16692\n\n \"\"\"\n if isinstance(marker, mpl.markers.MarkerStyle):\n if fillstyle is None:\n return marker\n else:\n marker = marker.get_marker()\n return mpl.markers.MarkerStyle(marker, fillstyle)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 17, "name": "get_marker", "kind": "ref", "category": "function", "info": " marker = marker.get_marker()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 18, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return mpl.markers.MarkerStyle(marker, fillstyle)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 21, "name": "norm_from_scale", "kind": "def", "category": "function", "info": "def norm_from_scale(scale, norm):\n \"\"\"Produce a Normalize object given a Scale and min/max domain limits.\"\"\"\n # This is an internal maplotlib function that simplifies things to access\n # It is likely to become part of the matplotlib API at some point:\n # https://github.com/matplotlib/matplotlib/issues/20329\n if isinstance(norm, mpl.colors.Normalize):\n return norm\n\n if scale is None:\n return None\n\n if norm is None:\n vmin = vmax = None\n else:\n vmin, vmax = norm # TODO more helpful error if this fails?\n\n class ScaledNorm(mpl.colors.Normalize):\n\n def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 37, "name": "ScaledNorm", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 39, "name": "__call__", "kind": "def", "category": "function", "info": " def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 42, "name": "process_value", "kind": "ref", "category": "function", "info": " value, is_scalar = self.process_value(value)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 43, "name": "autoscale_None", "kind": "ref", "category": "function", "info": " self.autoscale_None(value)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 53, "name": "transform", "kind": "ref", "category": "function", "info": " t_value = self.transform(value).reshape(np.shape(value))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 54, "name": "transform", "kind": "ref", "category": "function", "info": " t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 60, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " t_value = np.ma.masked_invalid(t_value, copy=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 63, "name": "ScaledNorm", "kind": "ref", "category": "function", "info": " new_norm = ScaledNorm(vmin, vmax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 64, "name": "get_transform", "kind": "ref", "category": "function", "info": " new_norm.transform = scale.get_transform().transform\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 69, "name": "scale_factory", "kind": "def", "category": "function", "info": "def scale_factory(scale, axis, **kwargs):\n \"\"\"\n Backwards compatability for creation of independent scales.\n\n Matplotlib scales require an Axis object for instantiation on < 3.4.\n But the axis is not used, aside from extraction of the axis_name in LogScale.\n\n \"\"\"\n modify_transform = False\n if Version(mpl.__version__) < Version(\"3.4\"):\n if axis[0] in \"xy\":\n modify_transform = True\n axis = axis[0]\n base = kwargs.pop(\"base\", None)\n if base is not None:\n kwargs[f\"base{axis}\"] = base\n nonpos = kwargs.pop(\"nonpositive\", None)\n if nonpos is not None:\n kwargs[f\"nonpos{axis}\"] = nonpos\n\n if isinstance(scale, str):\n class Axis:\n axis_name = axis\n axis = Axis()\n\n scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n\n if modify_transform:\n transform = scale.get_transform()\n transform.base = kwargs.get(\"base\", 10)\n if kwargs.get(\"nonpositive\") == \"mask\":\n # Setting a private attribute, but we only get here\n # on an old matplotlib, so this won't break going forwards\n transform._clip = False\n\n return scale\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 90, "name": "Axis", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 92, "name": "Axis", "kind": "ref", "category": "function", "info": " axis = Axis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 94, "name": "scale_factory", "kind": "ref", "category": "function", "info": " scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 97, "name": "get_transform", "kind": "ref", "category": "function", "info": " transform = scale.get_transform()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 107, "name": "set_scale_obj", "kind": "def", "category": "function", "info": "def set_scale_obj(ax, axis, scale):\n \"\"\"Handle backwards compatability with setting matplotlib scale.\"\"\"\n if Version(mpl.__version__) < Version(\"3.4\"):\n # The ability to pass a BaseScale instance to Axes.set_{}scale was added\n # to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089\n # Workaround: use the scale name, which is restrictive only if the user\n # wants to define a custom scale; they'll need to update the registry too.\n if scale.name is None:\n # Hack to support our custom Formatter-less CatScale\n return\n method = getattr(ax, f\"set_{axis}scale\")\n kws = {}\n if scale.name == \"function\":\n trans = scale.get_transform()\n kws[\"functions\"] = (trans._forward, trans._inverse)\n method(scale.name, **kws)\n axis_obj = getattr(ax, f\"{axis}axis\")\n scale.set_default_locators_and_formatters(axis_obj)\n else:\n ax.set(**{f\"{axis}scale\": scale})\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.4\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 120, "name": "get_transform", "kind": "ref", "category": "function", "info": " trans = scale.get_transform()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 122, "name": "method", "kind": "ref", "category": "function", "info": " method(scale.name, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 124, "name": "set_default_locators_and_formatters", "kind": "ref", "category": "function", "info": " scale.set_default_locators_and_formatters(axis_obj)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 129, "name": "get_colormap", "kind": "def", "category": "function", "info": "def get_colormap(name):\n \"\"\"Handle changes to matplotlib colormap interface in 3.6.\"\"\"\n try:\n return mpl.colormaps[name]\n except AttributeError:\n return mpl.cm.get_cmap(name)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 134, "name": "get_cmap", "kind": "ref", "category": "function", "info": " return mpl.cm.get_cmap(name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 137, "name": "register_colormap", "kind": "def", "category": "function", "info": "def register_colormap(name, cmap):\n \"\"\"Handle changes to matplotlib colormap interface in 3.6.\"\"\"\n try:\n if name not in mpl.colormaps:\n mpl.colormaps.register(cmap, name=name)\n except AttributeError:\n mpl.cm.register_cmap(name, cmap)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 141, "name": "register", "kind": "ref", "category": "function", "info": " mpl.colormaps.register(cmap, name=name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 143, "name": "register_cmap", "kind": "ref", "category": "function", "info": " mpl.cm.register_cmap(name, cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 146, "name": "set_layout_engine", "kind": "def", "category": "function", "info": "def set_layout_engine(fig, engine):\n \"\"\"Handle changes to auto layout engine interface in 3.6\"\"\"\n if hasattr(fig, \"set_layout_engine\"):\n fig.set_layout_engine(engine)\n else:\n if engine == \"tight\":\n fig.set_tight_layout(True)\n elif engine == \"constrained\":\n fig.set_constrained_layout(True)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 149, "name": "set_layout_engine", "kind": "ref", "category": "function", "info": " fig.set_layout_engine(engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 152, "name": "set_tight_layout", "kind": "ref", "category": "function", "info": " fig.set_tight_layout(True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 154, "name": "set_constrained_layout", "kind": "ref", "category": "function", "info": " fig.set_constrained_layout(True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 157, "name": "share_axis", "kind": "def", "category": "function", "info": "def share_axis(ax0, ax1, which):\n \"\"\"Handle changes to post-hoc axis sharing.\"\"\"\n if Version(mpl.__version__) < Version(\"3.5.0\"):\n group = getattr(ax0, f\"get_shared_{which}_axes\")()\n group.join(ax1, ax0)\n else:\n getattr(ax1, f\"share{which}\")(ax0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 159, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.5.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 159, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.5.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 14, "name": "PlotData", "kind": "def", "category": "class", "info": "__init__\t__contains__\tjoin\t_assign_variables"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 54, "name": "_assign_variables", "kind": "ref", "category": "function", "info": " frame, names, ids = self._assign_variables(data, variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 90, "name": "PlotData", "kind": "ref", "category": "function", "info": " new = PlotData(data, variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 118, "name": "_assign_variables", "kind": "def", "category": "function", "info": " def _assign_variables(\n self,\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataFrame, dict[str, str | None], dict[str, str | int]]:\n \"\"\"\n Assign values for plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data\n Input data where variable names map to vector values.\n variables\n Keys are names of plot variables (x, y, ...) each value is one of:\n\n - name of a column (or index level, or dictionary entry) in `data`\n - vector in any format that can construct a :class:`pandas.DataFrame`\n\n Returns\n -------\n frame\n Table mapping seaborn variables (x, y, color, ...) to data vectors.\n names\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n ids\n Like the `names` dict, but `None` values are replaced by the `id()`\n of the data object that defined the variable.\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in `data`, or when they are\n non-indexed vector datatypes that have a different length from `data`.\n\n \"\"\"\n source_data: Mapping | DataFrame\n frame: DataFrame\n names: dict[str, str | None]\n ids: dict[str, str | int]\n\n plot_data = {}\n names = {}\n ids = {}\n\n given_data = data is not None\n if data is not None:\n source_data = data\n else:\n # Data is optional; all variables can be defined as vectors\n # But simplify downstream code by always having a usable source data object\n source_data = {}\n\n # TODO Generally interested in accepting a generic DataFrame interface\n # Track https://data-apis.org/ for development\n\n # Variables can also be extracted from the index of a DataFrame\n if isinstance(source_data, pd.DataFrame):\n index = source_data.index.to_frame().to_dict(\"series\")\n else:\n index = {}\n\n for key, val in variables.items():\n\n # Simply ignore variables with no specification\n if val is None:\n continue\n\n # Try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow other hashables when\n # taking from the main data object. Allow only strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n\n # TODO this will be rendered unnecessary by the following pandas fix:\n # https://github.com/pandas-dev/pandas/pull/41283\n try:\n hash(val)\n val_is_hashable = True\n except TypeError:\n val_is_hashable = False\n\n val_as_data_key = (\n # See https://github.com/pandas-dev/pandas/pull/41283\n # (isinstance(val, abc.Hashable) and val in source_data)\n (val_is_hashable and val in source_data)\n or (isinstance(val, str) and val in index)\n )\n\n if val_as_data_key:\n val = cast(ColumnName, val)\n if val in source_data:\n plot_data[key] = source_data[val]\n elif val in index:\n plot_data[key] = index[val]\n names[key] = ids[key] = str(val)\n\n elif isinstance(val, str):\n\n # This looks like a column name but, lookup failed.\n\n err = f\"Could not interpret value `{val}` for `{key}`. \"\n if not given_data:\n err += \"Value is a string, but `data` was not passed.\"\n else:\n err += \"An entry with this name does not appear in `data`.\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value somehow represents data\n\n # Ignore empty data structures\n if isinstance(val, Sized) and len(val) == 0:\n continue\n\n # If vector has no index, it must match length of data table\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if isinstance(val, Sized) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the original name using pandas-like metadata\n if hasattr(val, \"name\"):\n names[key] = ids[key] = str(val.name) # type: ignore # mypy/1424\n else:\n names[key] = None\n ids[key] = id(val)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n # TODO Note: this fails when variable specs *only* have scalars!\n frame = pd.DataFrame(plot_data)\n\n return frame, names, ids\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 176, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = source_data.index.to_frame().to_dict(\"series\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/exceptions.py", "rel_fname": "seaborn/_core/exceptions.py", "line": 9, "name": "PlotSpecError", "kind": "def", "category": "class", "info": "_during"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/exceptions.py", "rel_fname": "seaborn/_core/exceptions.py", "line": 21, "name": "_during", "kind": "def", "category": "function", "info": " def _during(cls, step: str, var: str = \"\") -> PlotSpecError:\n \"\"\"\n Initialize the class to report the failure of a specific operation.\n \"\"\"\n message = []\n if var:\n message.append(f\"{step} failed for the `{var}` variable.\")\n else:\n message.append(f\"{step} failed.\")\n message.append(\"See the traceback above for more information.\")\n return cls(\" \".join(message))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/exceptions.py", "rel_fname": "seaborn/_core/exceptions.py", "line": 31, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(\" \".join(message))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 15, "name": "GroupBy", "kind": "def", "category": "class", "info": "__init__\t_get_groups\t_reorder_columns\tagg\tapply"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 48, "name": "_get_groups", "kind": "def", "category": "function", "info": " def _get_groups(\n self, data: DataFrame\n ) -> tuple[str | list[str], Index | MultiIndex]:\n \"\"\"Return index with Cartesian product of ordered grouping variable levels.\"\"\"\n levels = {}\n for var, order in self.order.items():\n if var in data:\n if order is None:\n order = categorical_order(data[var])\n levels[var] = order\n\n grouper: str | list[str]\n groups: Index | MultiIndex\n if not levels:\n grouper = []\n groups = pd.Index([])\n elif len(levels) > 1:\n grouper = list(levels)\n groups = pd.MultiIndex.from_product(levels.values(), names=grouper)\n else:\n grouper, = list(levels)\n groups = pd.Index(levels[grouper], name=grouper)\n return grouper, groups\n\n def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n # Implies that we had a MultiIndex so key is iterable\n group_ids = dict(zip(grouper, cast(Iterable, key)))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 56, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(data[var])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 72, "name": "_reorder_columns", "kind": "def", "category": "function", "info": " def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n # Implies that we had a MultiIndex so key is iterable\n group_ids = dict(zip(grouper, cast(Iterable, key)))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 87, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 109, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 112, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 112, "name": "func", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 116, "name": "func", "kind": "ref", "category": "function", "info": " parts[key] = func(part_df, *args, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 128, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(res, data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 11, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 15, "name": "Move", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 27, "name": "Jitter", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 57, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(self.seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 59, "name": "jitter", "kind": "def", "category": "function", "info": " def jitter(data, col, scale):\n noise = rng.uniform(-.5, +.5, len(data))\n offsets = noise * scale\n return data[col] + offsets\n\n if self.width is default:\n width = 0.0 if self.x or self.y else 0.2\n else:\n width = cast(float, self.width)\n\n if self.width:\n data[orient] = jitter(data, orient, width * data[\"width\"])\n if self.x:\n data[\"x\"] = jitter(data, \"x\", self.x)\n if self.y:\n data[\"y\"] = jitter(data, \"y\", self.y)\n\n return data\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 60, "name": "uniform", "kind": "ref", "category": "function", "info": " noise = rng.uniform(-.5, +.5, len(data))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 70, "name": "jitter", "kind": "ref", "category": "function", "info": " data[orient] = jitter(data, orient, width * data[\"width\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 72, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"x\"] = jitter(data, \"x\", self.x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 74, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"y\"] = jitter(data, \"y\", self.y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 80, "name": "Dodge", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 114, "name": "groupby_pos", "kind": "def", "category": "function", "info": " def groupby_pos(s):\n grouper = [groups[v] for v in [orient, \"col\", \"row\"] if v in data]\n return s.groupby(grouper, sort=False, observed=True)\n\n def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 118, "name": "scale_widths", "kind": "def", "category": "function", "info": " def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 129, "name": "widths_to_offsets", "kind": "def", "category": "function", "info": " def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 132, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 133, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 153, "name": "Stack", "kind": "def", "category": "class", "info": "_stack\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 164, "name": "_stack", "kind": "def", "category": "function", "info": " def _stack(self, df, orient):\n\n # TODO should stack do something with ymin/ymax style marks?\n # Should there be an upstream conversion to baseline/height parameterization?\n\n if df[\"baseline\"].nunique() > 1:\n err = \"Stack move cannot be used when baselines are already heterogeneous\"\n raise RuntimeError(err)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n stacked_lengths = (df[other] - df[\"baseline\"]).dropna().cumsum()\n offsets = stacked_lengths.shift(1).fillna(0)\n\n df[other] = stacked_lengths\n df[\"baseline\"] = df[\"baseline\"] + offsets\n\n return df\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n # TODO where to ensure that other semantic variables are sorted properly?\n # TODO why are we not using the passed in groupby here?\n groupers = [\"col\", \"row\", orient]\n return GroupBy(groupers).apply(data, self._stack, orient)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 189, "name": "GroupBy", "kind": "ref", "category": "function", "info": " return GroupBy(groupers).apply(data, self._stack, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 193, "name": "Shift", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 221, "name": "Norm", "kind": "def", "category": "class", "info": "_norm\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 249, "name": "_norm", "kind": "def", "category": "function", "info": " def _norm(self, df, var):\n\n if self.where is None:\n denom_data = df[var]\n else:\n denom_data = df.query(self.where)[var]\n df[var] = df[var] / denom_data.agg(self.func)\n\n if self.percent:\n df[var] = df[var] * 100\n\n return df\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return groupby.apply(data, self._norm, other)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 56, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 62, "name": "Layer", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 74, "name": "FacetSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 81, "name": "PairSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 93, "name": "theme_context", "kind": "def", "category": "function", "info": "def theme_context(params: dict[str, Any]) -> Generator:\n \"\"\"Temporarily modify specifc matplotlib rcParams.\"\"\"\n orig_params = {k: mpl.rcParams[k] for k in params}\n color_codes = \"bgrmyck\"\n nice_colors = [*color_palette(\"deep6\"), (.15, .15, .15)]\n orig_colors = [mpl.colors.colorConverter.colors[x] for x in color_codes]\n # TODO how to allow this to reflect the color cycle when relevant?\n try:\n mpl.rcParams.update(params)\n for (code, color) in zip(color_codes, nice_colors):\n mpl.colors.colorConverter.colors[code] = color\n mpl.colors.colorConverter.cache[code] = color\n yield\n finally:\n mpl.rcParams.update(orig_params)\n for (code, color) in zip(color_codes, orig_colors):\n mpl.colors.colorConverter.colors[code] = color\n mpl.colors.colorConverter.cache[code] = color\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 97, "name": "color_palette", "kind": "ref", "category": "function", "info": " nice_colors = [*color_palette(\"deep6\"), (.15, .15, .15)]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 113, "name": "build_plot_signature", "kind": "def", "category": "function", "info": "def build_plot_signature(cls):\n \"\"\"\n Decorator function for giving Plot a useful signature.\n\n Currently this mostly saves us some duplicated typing, but we would\n like eventually to have a way of registering new semantic properties,\n at which point dynamic signature generation would become more important.\n\n \"\"\"\n sig = inspect.signature(cls)\n params = [\n inspect.Parameter(\"args\", inspect.Parameter.VAR_POSITIONAL),\n inspect.Parameter(\"data\", inspect.Parameter.KEYWORD_ONLY, default=None)\n ]\n params.extend([\n inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)\n for name in PROPERTIES\n ])\n new_sig = sig.replace(parameters=params)\n cls.__signature__ = new_sig\n\n known_properties = textwrap.fill(\n \", \".join([f\"|{p}|\" for p in PROPERTIES]),\n width=78, subsequent_indent=\" \" * 8,\n )\n\n if cls.__doc__ is not None: # support python -OO mode\n cls.__doc__ = cls.__doc__.format(known_properties=known_properties)\n\n return cls\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 149, "name": "Plot", "kind": "def", "category": "class", "info": "__init__\t_resolve_positionals\t__add__\t_repr_png_\t_clone\t_theme_with_defaults\t_variables\ton\tadd\tpair\tfacet\tscale\tshare\tlimit\tlabel\tlayout\ttheme\tsave\tshow\tplot\t_plot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 207, "name": "_resolve_positionals", "kind": "ref", "category": "function", "info": " data, variables = self._resolve_positionals(args, data, variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 214, "name": "PlotData", "kind": "ref", "category": "function", "info": " self._data = PlotData(data, variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 233, "name": "_resolve_positionals", "kind": "def", "category": "function", "info": " def _resolve_positionals(\n self,\n args: tuple[DataSource | VariableSpec, ...],\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataSource, dict[str, VariableSpec]]:\n \"\"\"Handle positional arguments, which may contain data / x / y.\"\"\"\n if len(args) > 3:\n err = \"Plot() accepts no more than 3 positional arguments (data, x, y).\"\n raise TypeError(err)\n\n # TODO need some clearer way to differentiate data / vector here\n # (There might be an abstract DataFrame class to use here?)\n if isinstance(args[0], (abc.Mapping, pd.DataFrame)):\n if data is not None:\n raise TypeError(\"`data` given by both name and position.\")\n data, args = args[0], args[1:]\n\n if len(args) == 2:\n x, y = args\n elif len(args) == 1:\n x, y = *args, None\n else:\n x = y = None\n\n for name, var in zip(\"yx\", (y, x)):\n if var is not None:\n if name in variables:\n raise TypeError(f\"`{name}` given by both name and position.\")\n # Keep coordinates at the front of the variables dict\n # Cast type because we know this isn't a DataSource at this point\n variables = {name: cast(VariableSpec, var), **variables}\n\n return data, variables\n\n def __add__(self, other):\n\n if isinstance(other, Mark) or isinstance(other, Stat):\n raise TypeError(\"Sorry, this isn't ggplot! Perhaps try Plot.add?\")\n\n other_type = other.__class__.__name__\n raise TypeError(f\"Unsupported operand type(s) for +: 'Plot' and '{other_type}\")\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 276, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 278, "name": "_repr_png_", "kind": "ref", "category": "function", "info": " return self.plot()._repr_png_()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 282, "name": "_clone", "kind": "def", "category": "function", "info": " def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 284, "name": "Plot", "kind": "ref", "category": "function", "info": " new = Plot()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 308, "name": "_theme_with_defaults", "kind": "def", "category": "function", "info": " def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 321, "name": "axes_style", "kind": "ref", "category": "function", "info": " **axes_style(\"darkgrid\"),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 322, "name": "plotting_context", "kind": "ref", "category": "function", "info": " **plotting_context(\"notebook\"),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 323, "name": "color_palette", "kind": "ref", "category": "function", "info": " \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 329, "name": "_variables", "kind": "def", "category": "function", "info": " def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 343, "name": "on", "kind": "def", "category": "function", "info": " def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 384, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 464, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 478, "name": "pair", "kind": "def", "category": "function", "info": " def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 537, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 541, "name": "facet", "kind": "def", "category": "function", "info": " def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 597, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 604, "name": "scale", "kind": "def", "category": "function", "info": " def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 627, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 631, "name": "share", "kind": "def", "category": "function", "info": " def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 646, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 650, "name": "limit", "kind": "def", "category": "function", "info": " def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 667, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 671, "name": "label", "kind": "def", "category": "function", "info": " def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 693, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 699, "name": "layout", "kind": "def", "category": "function", "info": " def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 733, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 744, "name": "theme", "kind": "def", "category": "function", "info": " def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 764, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 777, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 791, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 791, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 792, "name": "_plot", "kind": "ref", "category": "function", "info": " self._plot().save(loc, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 792, "name": "save", "kind": "ref", "category": "function", "info": " self._plot().save(loc, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 819, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 819, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 820, "name": "_plot", "kind": "ref", "category": "function", "info": " return self._plot(pyplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 822, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 827, "name": "Plotter", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 827, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 830, "name": "_extract_data", "kind": "ref", "category": "function", "info": " common, layers = plotter._extract_data(self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 831, "name": "_setup_figure", "kind": "ref", "category": "function", "info": " plotter._setup_figure(self, common, layers)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 834, "name": "match", "kind": "ref", "category": "function", "info": " coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 835, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, common, layers, coord_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 838, "name": "_compute_stats", "kind": "ref", "category": "function", "info": " plotter._compute_stats(self, layers)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 841, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, common, layers)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 850, "name": "_plot_layer", "kind": "ref", "category": "function", "info": " plotter._plot_layer(self, layer)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 853, "name": "_make_legend", "kind": "ref", "category": "function", "info": " plotter._make_legend(self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 854, "name": "_finalize_figure", "kind": "ref", "category": "function", "info": " plotter._finalize_figure(self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 862, "name": "Plotter", "kind": "def", "category": "class", "info": "__init__\tsave\tshow\t_repr_png_\t_extract_data\t_resolve_label\t_setup_figure\t_compute_stats\t_get_scale\t_get_subplot_data\t_setup_scales\t_plot_layer\t_unscale_coords\t_generate_pairings\t_get_subplot_index\t_filter_subplot_data\t_setup_split_generator\t_update_legend_contents\t_make_legend\t_finalize_figure"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 883, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plotter: # TODO type args\n kwargs.setdefault(\"dpi\", 96)\n try:\n loc = os.path.expanduser(loc)\n except TypeError:\n # loc may be a buffer in which case that would not work\n pass\n self._figure.savefig(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Display the plot by hooking into pyplot.\n\n This method calls :func:`matplotlib.pyplot.show` with any keyword parameters.\n\n \"\"\"\n # TODO if we did not create the Plotter with pyplot, is it possible to do this?\n # If not we should clearly raise.\n import matplotlib.pyplot as plt\n with theme_context(self._theme):\n plt.show(**kwargs)\n\n # TODO API for accessing the underlying matplotlib objects\n # TODO what else is useful in the public API for this class?\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n # TODO better to do this through a Jupyter hook? e.g.\n # ipy = IPython.core.formatters.get_ipython()\n # fmt = ipy.display_formatter.formatters[\"text/html\"]\n # fmt.for_type(Plot, ...)\n # Would like to have a svg option too, not sure how to make that flexible\n\n # TODO use matplotlib backend directly instead of going through savefig?\n\n # TODO perhaps have self.show() flip a switch to disable this, so that\n # user does not end up with two versions of the figure in the output\n\n # TODO use bbox_inches=\"tight\" like the inline backend?\n # pro: better results, con: (sometimes) confusing results\n # Better solution would be to default (with option to change)\n # to using constrained/tight layout.\n\n # TODO need to decide what the right default behavior here is:\n # - Use dpi=72 to match default InlineBackend figure size?\n # - Accept a generic \"scaling\" somewhere and scale DPI from that,\n # either with 1x -> 72 or 1x -> 96 and the default scaling be .75?\n # - Listen to rcParams? InlineBackend behavior makes that so complicated :(\n # - Do we ever want to *not* use retina mode at this point?\n\n from PIL import Image\n\n dpi = 96\n buffer = io.BytesIO()\n\n with theme_context(self._theme):\n self._figure.savefig(buffer, dpi=dpi * 2, format=\"png\", bbox_inches=\"tight\")\n data = buffer.getvalue()\n\n scaling = .85 / 2\n w, h = Image.open(buffer).size\n metadata = {\"width\": w * scaling, \"height\": h * scaling}\n return data, metadata\n\n def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 886, "name": "expanduser", "kind": "ref", "category": "function", "info": " loc = os.path.expanduser(loc)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 903, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 909, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]]:\n\n return self.plot()._repr_png_()\n\n # TODO _repr_svg_?\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n style_groups = [\n \"axes\", \"figure\", \"font\", \"grid\", \"hatch\", \"legend\", \"lines\",\n \"mathtext\", \"markers\", \"patch\", \"savefig\", \"scatter\",\n \"xaxis\", \"xtick\", \"yaxis\", \"ytick\",\n ]\n base = {\n k: mpl.rcParamsDefault[k] for k in mpl.rcParams\n if any(k.startswith(p) for p in style_groups)\n }\n theme = {\n **base,\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the default appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = args[0]\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 939, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 941, "name": "getvalue", "kind": "ref", "category": "function", "info": " data = buffer.getvalue()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 948, "name": "_extract_data", "kind": "def", "category": "function", "info": " def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 964, "name": "_resolve_label", "kind": "def", "category": "function", "info": " def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 970, "name": "manual_label", "kind": "ref", "category": "function", "info": " label = manual_label(auto_label)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 979, "name": "_setup_figure", "kind": "def", "category": "function", "info": " def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 993, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(common.frame[dim])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 996, "name": "Subplots", "kind": "ref", "category": "function", "info": " self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 999, "name": "init_figure", "kind": "ref", "category": "function", "info": " self._figure = subplots.init_figure(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1020, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " label = self._resolve_label(p, axis_key, auto_label)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1056, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1058, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " key = self._resolve_label(p, dim, common.names.get(dim))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1076, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " title = self._resolve_label(p, \"title\", None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1079, "name": "_compute_stats", "kind": "def", "category": "function", "info": " def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1115, "name": "match", "kind": "ref", "category": "function", "info": " drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1119, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1125, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(grouper)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1133, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1142, "name": "infer_scale", "kind": "ref", "category": "function", "info": " scale = prop.infer_scale(arg, values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1144, "name": "default_scale", "kind": "ref", "category": "function", "info": " scale = prop.default_scale(values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1148, "name": "_get_subplot_data", "kind": "def", "category": "function", "info": " def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1157, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(df, view)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1170, "name": "_setup_scales", "kind": "def", "category": "function", "info": " def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, set update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = new_series\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1192, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1218, "name": "_get_scale", "kind": "ref", "category": "function", "info": " scale = self._get_scale(p, scale_key, prop, var_df[var])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1240, "name": "Version", "kind": "ref", "category": "function", "info": " if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1240, "name": "Version", "kind": "ref", "category": "function", "info": " if axis is not None and Version(mpl.__version__) < Version(\"3.4.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1250, "name": "_identity", "kind": "ref", "category": "function", "info": " self._scales[var] = Scale._identity()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1253, "name": "_setup", "kind": "ref", "category": "function", "info": " self._scales[var] = scale._setup(var_df[var], prop)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1255, "name": "_during", "kind": "ref", "category": "function", "info": " raise PlotSpecError._during(\"Scale setup\", var) from err\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1272, "name": "_get_subplot_data", "kind": "ref", "category": "function", "info": " seed_values = self._get_subplot_data(var_df, var, view, share_state)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1273, "name": "_setup", "kind": "ref", "category": "function", "info": " view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1274, "name": "set_scale_obj", "kind": "ref", "category": "function", "info": " set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1281, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(layer_df, view)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1283, "name": "view_scale", "kind": "ref", "category": "function", "info": " new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1285, "name": "_during", "kind": "ref", "category": "function", "info": " spec_error = PlotSpecError._during(\"Scaling operation\", var)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1294, "name": "_plot_layer", "kind": "def", "category": "function", "info": " def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1305, "name": "_generate_pairings", "kind": "ref", "category": "function", "info": " for subplots, df, scales in self._generate_pairings(data, pair_variables):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1307, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1309, "name": "get_order", "kind": "def", "category": "function", "info": " def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1320, "name": "_get_subplot_data", "kind": "ref", "category": "function", "info": " view_idx = self._get_subplot_data(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1325, "name": "_resolve", "kind": "ref", "category": "function", "info": " view_width = mark._resolve(view_df, \"width\", None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1330, "name": "_spacing", "kind": "ref", "category": "function", "info": " spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1339, "name": "_resolve", "kind": "ref", "category": "function", "info": " baseline = mark._resolve(df, \"baseline\", None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1356, "name": "get_order", "kind": "ref", "category": "function", "info": " order = {var: get_order(var) for var in move_groupers}\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1357, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1358, "name": "move_step", "kind": "ref", "category": "function", "info": " df = move_step(df, groupby, orient, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1360, "name": "_unscale_coords", "kind": "ref", "category": "function", "info": " df = self._unscale_coords(subplots, df, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1363, "name": "_setup_split_generator", "kind": "ref", "category": "function", "info": " split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1365, "name": "_plot", "kind": "ref", "category": "function", "info": " mark._plot(split_generator, scales, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1372, "name": "_update_legend_contents", "kind": "ref", "category": "function", "info": " self._update_legend_contents(p, mark, data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1374, "name": "_unscale_coords", "kind": "def", "category": "function", "info": " def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n drop_cols = [*coord_cols, \"width\"] if \"width\" in df else coord_cols\n out_df = (\n df\n .drop(drop_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n if var == orient and \"width\" in view_df:\n width = view_df[\"width\"]\n out_df.loc[values.index, \"width\"] = (\n transform(values + width / 2) - transform(values - width / 2)\n )\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1378, "name": "match", "kind": "ref", "category": "function", "info": " coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1388, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " view_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1394, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = axis.get_transform().inverted().transform\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1406, "name": "_generate_pairings", "kind": "def", "category": "function", "info": " def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1443, "name": "match", "kind": "ref", "category": "function", "info": " cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1448, "name": "_get_subplot_index", "kind": "def", "category": "function", "info": " def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1459, "name": "_filter_subplot_data", "kind": "def", "category": "function", "info": " def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1470, "name": "_setup_split_generator", "kind": "def", "category": "function", "info": " def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1481, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(df[var])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1484, "name": "split_generator", "kind": "def", "category": "function", "info": " def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1488, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " axes_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1524, "name": "get_group", "kind": "ref", "category": "function", "info": " df_subset = grouped_df.get_group(pd_key)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1544, "name": "_update_legend_contents", "kind": "def", "category": "function", "info": " def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1575, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " title = self._resolve_label(p, var, data.names[var])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1584, "name": "_legend_artist", "kind": "ref", "category": "function", "info": " artist = mark._legend_artist(variables, val, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1592, "name": "_make_legend", "kind": "def", "category": "function", "info": " def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1621, "name": "Legend", "kind": "ref", "category": "function", "info": " legend = mpl.legend.Legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1641, "name": "_finalize_figure", "kind": "def", "category": "function", "info": " def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n # Nominal scale special-casing\n if isinstance(self._scales.get(axis_key), Nominal):\n axis_obj.grid(False, which=\"both\")\n if axis_key not in p._limits:\n nticks = len(axis_obj.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if axis == \"y\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{axis}lim\")\n set_lim(lo, hi, auto=None)\n\n engine_default = None if p._target is not None else \"tight\"\n layout_engine = p._layout_spec.get(\"engine\", engine_default)\n set_layout_engine(self._figure, layout_engine)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1653, "name": "convert_units", "kind": "ref", "category": "function", "info": " lo = a if a is None else convert_units(a)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1654, "name": "convert_units", "kind": "ref", "category": "function", "info": " hi = b if b is None else convert_units(b)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1665, "name": "get_major_ticks", "kind": "ref", "category": "function", "info": " nticks = len(axis_obj.get_major_ticks())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1670, "name": "set_lim", "kind": "ref", "category": "function", "info": " set_lim(lo, hi, auto=None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 46, "name": "Property", "kind": "def", "category": "class", "info": "__init__\tdefault_scale\tinfer_scale\tget_mapping\tstandardize\t_check_dict_entries\t_check_list_length"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 61, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Scale:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n # TODO allow variable_type to be \"boolean\" if that's a scale?\n # TODO how will this handle data with units that can be treated as numeric\n # if passed through a registered matplotlib converter?\n var_type = variable_type(data, boolean_type=\"numeric\")\n if var_type == \"numeric\":\n return Continuous()\n elif var_type == \"datetime\":\n return Temporal()\n # TODO others\n # time-based (TimeStamp, TimeDelta, Period)\n # boolean scale?\n else:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 66, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 68, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 70, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 75, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 77, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 88, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(trans=arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 97, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 105, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 109, "name": "_check_dict_entries", "kind": "def", "category": "function", "info": " def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 117, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 147, "name": "Coordinate", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 158, "name": "IntervalProperty", "kind": "def", "category": "class", "info": "default_range\t_forward\t_inverse\tinfer_scale\tget_mapping\t_get_categorical_mapping"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 166, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 170, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 174, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 178, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 184, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 185, "name": "variable_type", "kind": "ref", "category": "function", "info": " elif variable_type(data) == \"categorical\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 186, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 187, "name": "variable_type", "kind": "ref", "category": "function", "info": " elif variable_type(data) == \"datetime\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 188, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 191, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 193, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 198, "name": "_get_categorical_mapping", "kind": "ref", "category": "function", "info": " return self._get_categorical_mapping(scale, data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 201, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(self.default_range)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 203, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(scale.values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 216, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 217, "name": "_inverse", "kind": "ref", "category": "function", "info": " return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 221, "name": "_get_categorical_mapping", "kind": "def", "category": "function", "info": " def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 225, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 228, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 231, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 245, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward([vmin, vmax])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 246, "name": "_inverse", "kind": "ref", "category": "function", "info": " values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 248, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 258, "name": "PointSize", "kind": "def", "category": "class", "info": "_forward\t_inverse"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 262, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values):\n \"\"\"Square native values to implement linear scaling of point area.\"\"\"\n return np.square(values)\n\n def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 266, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 271, "name": "LineWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 274, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 280, "name": "EdgeWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 283, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 289, "name": "Stroke", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 294, "name": "Alpha", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 300, "name": "Offset", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 306, "name": "FontSize", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 311, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n if isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif variable_type(data) == \"categorical\":\n return Nominal(arg)\n elif variable_type(data) == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(\n self, scale: Scale, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 322, "name": "ObjectProperty", "kind": "def", "category": "class", "info": "_default_values\tdefault_scale\tinfer_scale\tget_mapping"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 331, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 334, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 335, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 337, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 338, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 340, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 345, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 349, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 352, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 354, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(n)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 362, "name": "standardize", "kind": "ref", "category": "function", "info": " values = [self.standardize(x) for x in values]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 364, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 374, "name": "Marker", "kind": "def", "category": "class", "info": "standardize\t_default_values"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 376, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " null_value = MarkerStyle(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 383, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: MarkerPattern) -> MarkerStyle:\n return MarkerStyle(val)\n\n def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 384, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return MarkerStyle(val)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 386, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 413, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " markers = [MarkerStyle(m) for m in markers[:n]]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 418, "name": "LineStyle", "kind": "def", "category": "class", "info": "standardize\t_default_values\t_get_dash_pattern"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 422, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: str | DashPattern) -> DashPatternWithOffset:\n return self._get_dash_pattern(val)\n\n def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 423, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return self._get_dash_pattern(val)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 425, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 466, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return [self._get_dash_pattern(x) for x in dashes]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 469, "name": "_get_dash_pattern", "kind": "def", "category": "function", "info": " def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 515, "name": "TextAlignment", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 519, "name": "HorizontalAlignment", "kind": "def", "category": "class", "info": "_default_values"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 521, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 526, "name": "VerticalAlignment", "kind": "def", "category": "class", "info": "_default_values"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 528, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 538, "name": "Color", "kind": "def", "category": "class", "info": "standardize\t_standardize_color_sequence\tinfer_scale\t_get_categorical_mapping\tget_mapping"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 543, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: ColorSpec) -> RGBTuple | RGBATuple:\n # Return color with alpha channel only if the input spec has it\n # This is so that RGBA colors can override the Alpha property\n if to_rgba(val) != to_rgba(val, 1):\n return to_rgba(val)\n else:\n return to_rgb(val)\n\n def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 551, "name": "_standardize_color_sequence", "kind": "def", "category": "function", "info": " def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 553, "name": "has_alpha", "kind": "def", "category": "function", "info": " def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"categorical\")\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n # TODO what about\n # - Temporal? (i.e. datetime)\n # - Boolean?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 559, "name": "has_alpha", "kind": "ref", "category": "function", "info": " needs_alpha = any(has_alpha(x) for x in colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 566, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 571, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"categorical\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 574, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 581, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 582, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 585, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 601, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 603, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 606, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 608, "name": "_get_categorical_mapping", "kind": "def", "category": "function", "info": " def _get_categorical_mapping(self, scale, data):\n \"\"\"Define mapping as lookup in list of discrete color values.\"\"\"\n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_color_sequence(colors)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_categorical_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 610, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 615, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 619, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 621, "name": "blend_palette", "kind": "ref", "category": "function", "info": " colors = blend_palette(values, n)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 623, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(values, n)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 625, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n <= len(get_color_cycle()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 627, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 629, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 639, "name": "_standardize_color_sequence", "kind": "ref", "category": "function", "info": " colors = self._standardize_color_sequence(colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 641, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 650, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 657, "name": "_get_categorical_mapping", "kind": "ref", "category": "function", "info": " return self._get_categorical_mapping(scale, data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 661, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(\"ch:\", as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 665, "name": "blend_palette", "kind": "ref", "category": "function", "info": " mapping = blend_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 670, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 681, "name": "_mapping", "kind": "def", "category": "function", "info": " def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 685, "name": "mapping", "kind": "ref", "category": "function", "info": " out = mapping(x)[:, :3]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 697, "name": "Fill", "kind": "def", "category": "class", "info": "standardize\t_default_values\tdefault_scale\tinfer_scale\tget_mapping"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 706, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> bool:\n return bool(val)\n\n def _default_values(self, n: int) -> list:\n \"\"\"Return a list of n values, alternating True and False.\"\"\"\n if n > 2:\n msg = \" \".join([\n f\"The variable assigned to {self.variable} has more than two levels,\",\n f\"so {self.variable} values will cycle and may be uninterpretable\",\n ])\n # TODO fire in a \"nice\" way (see above)\n warnings.warn(msg, UserWarning)\n return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]\n\n def default_scale(self, data: Series) -> Nominal:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO infer Boolean where possible?\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps each data value to True or False.\"\"\"\n # TODO categorical_order is going to return [False, True] for booleans,\n # and [0, 1] for binary, but the default values order is [True, False].\n # We should special case this to handle it properly, or change\n # categorical_order to not \"sort\" booleans. Note that we need to sync with\n # what's going to happen upstream in the scale, so we can't just do it here.\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n\n if isinstance(scale.values, list):\n values = [bool(x) for x in scale.values]\n elif isinstance(scale.values, dict):\n values = [bool(scale.values[x]) for x in levels]\n elif scale.values is None:\n values = self._default_values(len(levels))\n else:\n msg = \" \".join([\n f\"Scale values for {self.variable} must be passed in\",\n f\"a list or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else False\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 709, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 720, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Nominal:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Nominal:\n return Nominal(arg)\n\n def get_mapping(\n self, scale: Scale, data: Series,\n ) -> Callable[[ArrayLike], list]:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n order = getattr(scale, \"order\", None)\n levels = categorical_order(data, order)\n n = len(levels)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 722, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 724, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 727, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 729, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(\n self, scale: Scale, data: Series\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 739, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 746, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(len(levels))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 754, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_categorical_mapping(\n self, scale: Nominal, data: ArrayLike\n ) -> Callable[[ArrayLike], ArrayLike]:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = self._inverse(np.linspace(vmax, vmin, len(levels)))\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 802, "name": "cls", "kind": "ref", "category": "function", "info": "PROPERTIES = {var: cls(var) for var, cls in PROPERTY_CLASSES.items()}\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 16, "name": "VarType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 37, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(\n vector: Series,\n boolean_type: Literal[\"numeric\", \"categorical\"] = \"numeric\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 65, "name": "is_categorical_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_categorical_dtype(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 66, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 70, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 86, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(boolean_type)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 89, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 90, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 92, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 93, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 99, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VarType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 105, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 106, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 110, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 116, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 117, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 121, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 124, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector: Series, order: list | None = None) -> list:\n \"\"\"\n Return a list of unique data values using seaborn's ordering rules.\n\n Parameters\n ----------\n vector : Series\n Vector of \"categorical\" values\n order : list\n Desired order of category levels to override the order determined\n from the `data` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is not None:\n return order\n\n if vector.dtype.name == \"category\":\n order = list(vector.cat.categories)\n else:\n order = list(filter(pd.notnull, vector.unique()))\n if variable_type(pd.Series(order)) == \"numeric\":\n order.sort()\n\n return order\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 149, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(pd.Series(order)) == \"numeric\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 53, "name": "Scale", "kind": "def", "category": "class", "info": "__post_init__\ttick\tlabel\t_get_locators\t_get_formatter\t_get_scale\t_spacing\t_setup\t__call__\t_identity"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 64, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n self._tick_params = None\n self._label_params = None\n self._legend = None\n\n def tick(self):\n raise NotImplementedError()\n\n def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 70, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self):\n raise NotImplementedError()\n\n def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 73, "name": "label", "kind": "def", "category": "function", "info": " def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 76, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 79, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 82, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 84, "name": "_get_locators", "kind": "ref", "category": "function", "info": " major_locator, minor_locator = self._get_locators(**self._tick_params)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 85, "name": "_get_formatter", "kind": "ref", "category": "function", "info": " major_formatter = self._get_formatter(major_locator, **self._label_params)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 87, "name": "InternalScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 94, "name": "InternalScale", "kind": "ref", "category": "function", "info": " return InternalScale(name, (forward, inverse))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 96, "name": "_spacing", "kind": "def", "category": "function", "info": " def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 97, "name": "_spacer", "kind": "ref", "category": "function", "info": " space = self._spacer(x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 104, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 123, "name": "func", "kind": "ref", "category": "function", "info": " trans_data = func(trans_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 131, "name": "_identity", "kind": "def", "category": "function", "info": " def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 133, "name": "Identity", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 139, "name": "Identity", "kind": "ref", "category": "function", "info": " return Identity()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 143, "name": "Nominal", "kind": "def", "category": "class", "info": "_setup\ttick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 154, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 160, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 162, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 167, "name": "categorical_order", "kind": "ref", "category": "function", "info": " units_seed = categorical_order(data, new.order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 181, "name": "CatScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 191, "name": "CatScale", "kind": "ref", "category": "function", "info": " mpl_scale = CatScale(data.name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 193, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 205, "name": "stringify", "kind": "ref", "category": "function", "info": " axis.update_units(stringify(np.array(units_seed)))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 216, "name": "stringify", "kind": "ref", "category": "function", "info": " out[keep] = axis.convert_units(stringify(x[keep]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 221, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(new, data),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 225, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n return 1\n\n new._spacer = spacer\n\n if prop.legend:\n new._legend = units_seed, list(stringify(units_seed))\n\n return new\n\n def tick(self, locator: Locator | None = None):\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n }\n return new\n\n def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 231, "name": "stringify", "kind": "ref", "category": "function", "info": " new._legend = units_seed, list(stringify(units_seed))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 235, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self, locator: Locator | None = None):\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n }\n return new\n\n def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 259, "name": "label", "kind": "def", "category": "function", "info": " def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 284, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 289, "name": "StrCategoryLocator", "kind": "ref", "category": "function", "info": " locator = mpl.category.StrCategoryLocator({})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 293, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 298, "name": "StrCategoryFormatter", "kind": "ref", "category": "function", "info": " formatter = mpl.category.StrCategoryFormatter({})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 304, "name": "Ordinal", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 310, "name": "Discrete", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 316, "name": "ContinuousBase", "kind": "def", "category": "class", "info": "_setup\t_get_transform"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 321, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 327, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 329, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 331, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = new._get_transform()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 333, "name": "_get_scale", "kind": "ref", "category": "function", "info": " mpl_scale = new._get_scale(str(data.name), forward, inverse)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 336, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 349, "name": "forward", "kind": "ref", "category": "function", "info": " a = forward(vmin)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 350, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 350, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 352, "name": "normalize", "kind": "def", "category": "function", "info": " def normalize(x):\n return (x - a) / b\n\n else:\n normalize = vmin = vmax = None\n\n new._pipeline = [\n axis.convert_units,\n forward,\n normalize,\n prop.get_mapping(new, data)\n ]\n\n def spacer(x):\n x = x.dropna().unique()\n if len(x) < 2:\n return np.nan\n return np.min(np.diff(np.sort(x)))\n new._spacer = spacer\n\n # TODO How to allow disabling of legend for all uses of property?\n # Could add a Scale parameter, or perhaps Scale.suppress()?\n # Are there other useful parameters that would be in Scale.legend()\n # besides allowing Scale.legend(False)?\n if prop.legend:\n axis.set_view_interval(vmin, vmax)\n locs = axis.major.locator()\n locs = locs[(vmin <= locs) & (locs <= vmax)]\n # Avoid having an offset / scientific notation in a legend\n # as we don't represent that anywhere so it ends up incorrect.\n # This could become an option (e.g. Continuous.label(offset=True))\n # in which case we would need to figure out how to show it.\n if hasattr(axis.major.formatter, \"set_useOffset\"):\n axis.major.formatter.set_useOffset(False)\n if hasattr(axis.major.formatter, \"set_scientific\"):\n axis.major.formatter.set_scientific(False)\n labels = axis.major.formatter.format_ticks(locs)\n new._legend = list(locs), list(labels)\n\n return new\n\n def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 362, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(new, data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 365, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n return 1\n\n new._spacer = spacer\n\n if prop.legend:\n new._legend = units_seed, list(stringify(units_seed))\n\n return new\n\n def tick(self, locator: Locator | None = None):\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n }\n return new\n\n def label(self, formatter: Formatter | None = None):\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n }\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 378, "name": "locator", "kind": "ref", "category": "function", "info": " locs = axis.major.locator()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 393, "name": "_get_transform", "kind": "def", "category": "function", "info": " def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 397, "name": "get_param", "kind": "def", "category": "function", "info": " def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 403, "name": "_make_identity_transforms", "kind": "ref", "category": "function", "info": " return _make_identity_transforms()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 408, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 410, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"logit\", 10)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 411, "name": "_make_logit_transforms", "kind": "ref", "category": "function", "info": " return _make_logit_transforms(base)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 413, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"log\", 10)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 414, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms(base)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 416, "name": "get_param", "kind": "ref", "category": "function", "info": " c = get_param(\"symlog\", 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 417, "name": "_make_symlog_transforms", "kind": "ref", "category": "function", "info": " return _make_symlog_transforms(c)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 419, "name": "get_param", "kind": "ref", "category": "function", "info": " exp = get_param(\"pow\", 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 420, "name": "_make_power_transforms", "kind": "ref", "category": "function", "info": " return _make_power_transforms(exp)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 422, "name": "_make_sqrt_transforms", "kind": "ref", "category": "function", "info": " return _make_sqrt_transforms()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 428, "name": "Continuous", "kind": "def", "category": "class", "info": "tick\tlabel\t_parse_for_log_params\t_get_locators\t_get_formatter"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 440, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] | None = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n # Input checks\n if locator is not None and not isinstance(locator, Locator):\n raise TypeError(\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError(\"`count` requires `between` with log transform.\")\n if every is not None:\n raise RuntimeError(\"`every` not supported with log transform.\")\n\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n \"at\": at,\n \"upto\": upto,\n \"count\": count,\n \"every\": every,\n \"between\": between,\n \"minor\": minor,\n }\n return new\n\n def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 482, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 501, "name": "label", "kind": "def", "category": "function", "info": " def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 552, "name": "_parse_for_log_params", "kind": "def", "category": "function", "info": " def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 558, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^log(\\d*)\", trans)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 561, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"symlog(\\d*)\", trans)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 566, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 568, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 585, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = self._get_transform()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 586, "name": "forward", "kind": "ref", "category": "function", "info": " lo, hi = forward(between)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 587, "name": "inverse", "kind": "ref", "category": "function", "info": " ticks = inverse(np.linspace(lo, hi, num=count))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 622, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 624, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 663, "name": "Temporal", "kind": "def", "category": "class", "info": "tick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 681, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] | None = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n # Input checks\n if locator is not None and not isinstance(locator, Locator):\n raise TypeError(\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError(\"`count` requires `between` with log transform.\")\n if every is not None:\n raise RuntimeError(\"`every` not supported with log transform.\")\n\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n \"at\": at,\n \"upto\": upto,\n \"count\": count,\n \"every\": every,\n \"between\": between,\n \"minor\": minor,\n }\n return new\n\n def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 715, "name": "label", "kind": "def", "category": "function", "info": " def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable that consumes a number\n and returns a string.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 744, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator, upto):\n\n if locator is not None:\n major_locator = locator\n elif upto is not None:\n major_locator = AutoDateLocator(minticks=2, maxticks=upto)\n\n else:\n major_locator = AutoDateLocator(minticks=2, maxticks=6)\n minor_locator = None\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, concise):\n\n if formatter is not None:\n return formatter\n\n if concise:\n # TODO ideally we would have concise coordinate ticks,\n # but full semantic ticks. Is that possible?\n formatter = ConciseDateFormatter(locator)\n else:\n formatter = AutoDateFormatter(locator)\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 757, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter, concise):\n\n if formatter is not None:\n return formatter\n\n if concise:\n # TODO ideally we would have concise coordinate ticks,\n # but full semantic ticks. Is that possible?\n formatter = ConciseDateFormatter(locator)\n else:\n formatter = AutoDateFormatter(locator)\n\n return formatter\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 790, "name": "PseudoAxis", "kind": "def", "category": "class", "info": "__init__\tset_view_interval\tget_view_interval\tset_data_interval\tget_data_interval\tget_tick_space\tset_major_locator\tset_major_formatter\tset_minor_locator\tset_minor_formatter\tset_units\tupdate_units\tconvert_units\tget_scale\tget_majorticklocs"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 807, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.major = mpl.axis.Ticker()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 808, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.minor = mpl.axis.Ticker()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 860, "name": "get_converter", "kind": "ref", "category": "function", "info": " self.converter = mpl.units.registry.get_converter(x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 862, "name": "default_units", "kind": "ref", "category": "function", "info": " self.converter.default_units(x, self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 864, "name": "axisinfo", "kind": "ref", "category": "function", "info": " info = self.converter.axisinfo(self.units, self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 882, "name": "convert", "kind": "ref", "category": "function", "info": " return self.converter.convert(x, self.units, self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 893, "name": "locator", "kind": "ref", "category": "function", "info": " return self.major.locator()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 900, "name": "_make_identity_transforms", "kind": "def", "category": "function", "info": "def _make_identity_transforms() -> TransFuncs:\n\n def identity(x):\n return x\n\n return identity, identity\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 908, "name": "_make_logit_transforms", "kind": "def", "category": "function", "info": "def _make_logit_transforms(base: float | None = None) -> TransFuncs:\n\n log, exp = _make_log_transforms(base)\n\n def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 910, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 912, "name": "logit", "kind": "def", "category": "function", "info": " def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 916, "name": "expit", "kind": "def", "category": "function", "info": " def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 923, "name": "_make_log_transforms", "kind": "def", "category": "function", "info": "def _make_log_transforms(base: float | None = None) -> TransFuncs:\n\n fs: TransFuncs\n if base is None:\n fs = np.log, np.exp\n elif base == 2:\n fs = np.log2, partial(np.power, 2)\n elif base == 10:\n fs = np.log10, partial(np.power, 10)\n else:\n def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 933, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 948, "name": "_make_symlog_transforms", "kind": "def", "category": "function", "info": "def _make_symlog_transforms(c: float = 1, base: float = 10) -> TransFuncs:\n\n # From https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001\n\n # Note: currently not using base because we only get\n # one parameter from the string, and are using c (this is consistent with d3)\n\n log, exp = _make_log_transforms(base)\n\n def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 955, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 957, "name": "symlog", "kind": "def", "category": "function", "info": " def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 961, "name": "symexp", "kind": "def", "category": "function", "info": " def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 968, "name": "_make_sqrt_transforms", "kind": "def", "category": "function", "info": "def _make_sqrt_transforms() -> TransFuncs:\n\n def sqrt(x):\n return np.sign(x) * np.sqrt(np.abs(x))\n\n def square(x):\n return np.sign(x) * np.square(x)\n\n return sqrt, square\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 979, "name": "_make_power_transforms", "kind": "def", "category": "function", "info": "def _make_power_transforms(exp: float) -> TransFuncs:\n\n def forward(x):\n return np.sign(x) * np.power(np.abs(x), exp)\n\n def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 981, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 984, "name": "inverse", "kind": "def", "category": "function", "info": " def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 15, "name": "Subplots", "kind": "def", "category": "class", "info": "__init__\t_check_dimension_uniqueness\t_determine_grid_dimensions\t_handle_wrapping\t_determine_axis_sharing\tinit_figure\t__iter__\t__len__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 40, "name": "_check_dimension_uniqueness", "kind": "ref", "category": "function", "info": " self._check_dimension_uniqueness(facet_spec, pair_spec)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 41, "name": "_determine_grid_dimensions", "kind": "ref", "category": "function", "info": " self._determine_grid_dimensions(facet_spec, pair_spec)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 42, "name": "_handle_wrapping", "kind": "ref", "category": "function", "info": " self._handle_wrapping(facet_spec, pair_spec)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 43, "name": "_determine_axis_sharing", "kind": "ref", "category": "function", "info": " self._determine_axis_sharing(pair_spec)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 45, "name": "_check_dimension_uniqueness", "kind": "def", "category": "function", "info": " def _check_dimension_uniqueness(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Reject specs that pair and facet on (or wrap to) same figure dimension.\"\"\"\n err = None\n\n facet_vars = facet_spec.get(\"variables\", {})\n\n if facet_spec.get(\"wrap\") and {\"col\", \"row\"} <= set(facet_vars):\n err = \"Cannot wrap facets when specifying both `col` and `row`.\"\n elif (\n pair_spec.get(\"wrap\")\n and pair_spec.get(\"cross\", True)\n and len(pair_spec.get(\"structure\", {}).get(\"x\", [])) > 1\n and len(pair_spec.get(\"structure\", {}).get(\"y\", [])) > 1\n ):\n err = \"Cannot wrap subplots when pairing on both `x` and `y`.\"\n\n collisions = {\"x\": [\"columns\", \"rows\"], \"y\": [\"rows\", \"columns\"]}\n for pair_axis, (multi_dim, wrap_dim) in collisions.items():\n if pair_axis not in pair_spec.get(\"structure\", {}):\n continue\n elif multi_dim[:3] in facet_vars:\n err = f\"Cannot facet the {multi_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and facet_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and pair_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {multi_dim} while faceting the {wrap_dim}.\"\n\n if err is not None:\n raise RuntimeError(err) # TODO what err class? Define PlotSpecError?\n\n def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 77, "name": "_determine_grid_dimensions", "kind": "def", "category": "function", "info": " def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 101, "name": "_handle_wrapping", "kind": "def", "category": "function", "info": " def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 120, "name": "_determine_axis_sharing", "kind": "def", "category": "function", "info": " def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 141, "name": "init_figure", "kind": "def", "category": "function", "info": " def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/typing.py", "rel_fname": "seaborn/_core/typing.py", "line": 33, "name": "Default", "kind": "def", "category": "class", "info": "__repr__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_core/typing.py", "rel_fname": "seaborn/_core/typing.py", "line": 38, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_decorators.py", "rel_fname": "seaborn/_decorators.py", "line": 3, "name": "share_init_params_with_map", "kind": "def", "category": "function", "info": "def share_init_params_with_map(cls):\n \"\"\"Make cls.map a classmethod with same signature as cls.__init__.\"\"\"\n map_sig = signature(cls.map)\n init_sig = signature(cls.__init__)\n\n new = [v for k, v in init_sig.parameters.items() if k != \"self\"]\n new.insert(0, map_sig.parameters[\"cls\"])\n cls.map.__signature__ = map_sig.replace(parameters=new)\n cls.map.__doc__ = cls.__init__.__doc__\n\n cls.map = classmethod(cls.map)\n\n return cls\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 5, "name": "DocstringComponents", "kind": "def", "category": "class", "info": "__init__\t__getattr__\tfrom_nested_components\tfrom_function_params"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 18, "name": "group", "kind": "ref", "category": "function", "info": " entries[key] = m.group(1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 24, "name": "__getattr__", "kind": "def", "category": "function", "info": " def __getattr__(self, attr):\n \"\"\"Provide dot access to entries for clean raw docstrings.\"\"\"\n if attr in self.entries:\n return self.entries[attr]\n else:\n try:\n return self.__getattribute__(attr)\n except AttributeError as err:\n # If Python is run with -OO, it will strip docstrings and our lookup\n # from self.entries will fail. We check for __debug__, which is actually\n # set to False by -O (it is True for normal execution).\n # But we only want to see an error when building the docs;\n # not something users should see, so this slight inconsistency is fine.\n if __debug__:\n raise err\n else:\n pass\n\n @classmethod\n def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 43, "name": "from_nested_components", "kind": "def", "category": "function", "info": " def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 45, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(kwargs, strip_whitespace=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 48, "name": "from_function_params", "kind": "def", "category": "function", "info": " def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 50, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 58, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(comp_dict)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 194, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " params=DocstringComponents(_core_params),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 195, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " returns=DocstringComponents(_core_returns),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 196, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " seealso=DocstringComponents(_seealso_blurbs),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 20, "name": "AreaBase", "kind": "def", "category": "class", "info": "_plot\t_standardize_coordinate_parameters\t_postprocess_artist\t_get_verts\t_legend_artist"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 22, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n patches = defaultdict(list)\n\n for keys, data, ax in split_gen():\n\n kws = {}\n data = self._standardize_coordinate_parameters(data, orient)\n resolved = resolve_properties(self, keys, scales)\n verts = self._get_verts(data, orient)\n ax.update_datalim(verts)\n\n # TODO should really move this logic into resolve_color\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n kws[\"facecolor\"] = fc\n kws[\"edgecolor\"] = resolve_color(self, keys, \"edge\", scales)\n kws[\"linewidth\"] = resolved[\"edgewidth\"]\n kws[\"linestyle\"] = resolved[\"edgestyle\"]\n\n patches[ax].append(mpl.patches.Polygon(verts, **kws))\n\n for ax, ax_patches in patches.items():\n\n for patch in ax_patches:\n self._postprocess_artist(patch, ax, orient)\n ax.add_patch(patch)\n\n def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 26, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 29, "name": "_standardize_coordinate_parameters", "kind": "ref", "category": "function", "info": " data = self._standardize_coordinate_parameters(data, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 30, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 31, "name": "_get_verts", "kind": "ref", "category": "function", "info": " verts = self._get_verts(data, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 32, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(verts)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 35, "name": "resolve_color", "kind": "ref", "category": "function", "info": " fc = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 37, "name": "to_rgba", "kind": "ref", "category": "function", "info": " fc = mpl.colors.to_rgba(fc, 0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 40, "name": "resolve_color", "kind": "ref", "category": "function", "info": " kws[\"edgecolor\"] = resolve_color(self, keys, \"edge\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 44, "name": "Polygon", "kind": "ref", "category": "function", "info": " patches[ax].append(mpl.patches.Polygon(verts, **kws))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 49, "name": "_postprocess_artist", "kind": "ref", "category": "function", "info": " self._postprocess_artist(patch, ax, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 50, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(patch)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 52, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 55, "name": "_postprocess_artist", "kind": "def", "category": "function", "info": " def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 58, "name": "_get_verts", "kind": "def", "category": "function", "info": " def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 61, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient, kind=\"mergesort\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 63, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}min\"]].to_numpy(),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 64, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 70, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 73, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 75, "name": "resolve_color", "kind": "ref", "category": "function", "info": " fc = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 77, "name": "to_rgba", "kind": "ref", "category": "function", "info": " fc = mpl.colors.to_rgba(fc, 0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 79, "name": "Patch", "kind": "ref", "category": "function", "info": " return mpl.patches.Patch(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 81, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edgecolor=resolve_color(self, keys, \"edge\", scales),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 90, "name": "Area", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters\t_postprocess_artist"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 103, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 104, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 105, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 106, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 107, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 108, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 109, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 112, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 114, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 116, "name": "rename", "kind": "ref", "category": "function", "info": " return data.rename(columns={\"baseline\": f\"{dv}min\", dv: f\"{dv}max\"})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 118, "name": "_postprocess_artist", "kind": "def", "category": "function", "info": " def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 123, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " artist.set_linewidth(artist.get_linewidth() * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 123, "name": "get_linewidth", "kind": "ref", "category": "function", "info": " artist.set_linewidth(artist.get_linewidth() * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 125, "name": "get_linestyle", "kind": "ref", "category": "function", "info": " linestyle = artist.get_linestyle()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 128, "name": "set_linestyle", "kind": "ref", "category": "function", "info": " artist.set_linestyle(linestyle)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "get_path", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "get_transform", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 132, "name": "set_clip_box", "kind": "ref", "category": "function", "info": " artist.set_clip_box(ax.bbox)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 140, "name": "Band", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 153, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 154, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 155, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 156, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 157, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 158, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(0, )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 159, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableFloat = Mappable(\"-\", )\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 161, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 168, "name": "groupby", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 168, "name": "agg", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 168, "name": "reset_index", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 27, "name": "BarBase", "kind": "def", "category": "class", "info": "_make_patches\t_resolve_properties\t_legend_artist"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 29, "name": "_make_patches", "kind": "def", "category": "function", "info": " def _make_patches(self, data, scales, orient):\n\n kws = self._resolve_properties(data, scales)\n if orient == \"x\":\n kws[\"x\"] = (data[\"x\"] - data[\"width\"] / 2).to_numpy()\n kws[\"y\"] = data[\"baseline\"].to_numpy()\n kws[\"w\"] = data[\"width\"].to_numpy()\n kws[\"h\"] = (data[\"y\"] - data[\"baseline\"]).to_numpy()\n else:\n kws[\"x\"] = data[\"baseline\"].to_numpy()\n kws[\"y\"] = (data[\"y\"] - data[\"width\"] / 2).to_numpy()\n kws[\"w\"] = (data[\"x\"] - data[\"baseline\"]).to_numpy()\n kws[\"h\"] = data[\"width\"].to_numpy()\n\n kws.pop(\"width\", None)\n kws.pop(\"baseline\", None)\n\n val_dim = {\"x\": \"h\", \"y\": \"w\"}[orient]\n bars, vals = [], []\n\n for i in range(len(data)):\n\n row = {k: v[i] for k, v in kws.items()}\n\n # Skip bars with no value. It's possible we'll want to make this\n # an option (i.e so you have an artist for animating or annotating),\n # but let's keep things simple for now.\n if not np.nan_to_num(row[val_dim]):\n continue\n\n bar = mpl.patches.Rectangle(\n xy=(row[\"x\"], row[\"y\"]),\n width=row[\"w\"],\n height=row[\"h\"],\n facecolor=row[\"facecolor\"],\n edgecolor=row[\"edgecolor\"],\n linestyle=row[\"edgestyle\"],\n linewidth=row[\"edgewidth\"],\n **self.artist_kws,\n )\n bars.append(bar)\n vals.append(row[val_dim])\n\n return bars, vals\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 31, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " kws = self._resolve_properties(data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 33, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"x\"] = (data[\"x\"] - data[\"width\"] / 2).to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 34, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"y\"] = data[\"baseline\"].to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 35, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"w\"] = data[\"width\"].to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 36, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"h\"] = (data[\"y\"] - data[\"baseline\"]).to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 38, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"x\"] = data[\"baseline\"].to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 39, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"y\"] = (data[\"y\"] - data[\"width\"] / 2).to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 40, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"w\"] = (data[\"x\"] - data[\"baseline\"]).to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 41, "name": "to_numpy", "kind": "ref", "category": "function", "info": " kws[\"h\"] = data[\"width\"].to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 59, "name": "Rectangle", "kind": "ref", "category": "function", "info": " bar = mpl.patches.Rectangle(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 74, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 76, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 78, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 79, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 90, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 95, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " key = self._resolve_properties(key, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 96, "name": "Patch", "kind": "ref", "category": "function", "info": " artist = mpl.patches.Patch(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 107, "name": "Bar", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 120, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 121, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 122, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 123, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 124, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 125, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 126, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 129, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(.8, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 130, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 132, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n val_idx = [\"y\", \"x\"].index(orient)\n\n for _, data, ax in split_gen():\n\n bars, vals = self._make_patches(data, scales, orient)\n\n for bar in bars:\n\n # Because we are clipping the artist (see below), the edges end up\n # looking half as wide as they actually are. I don't love this clumsy\n # workaround, which is going to cause surprises if you work with the\n # artists directly. We may need to revisit after feedback.\n bar.set_linewidth(bar.get_linewidth() * 2)\n linestyle = bar.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))\n bar.set_linestyle(linestyle)\n\n # This is a bit of a hack to handle the fact that the edge lines are\n # centered on the actual extents of the bar, and overlap when bars are\n # stacked or dodged. We may discover that this causes problems and needs\n # to be revisited at some point. Also it should be faster to clip with\n # a bbox than a path, but I cant't work out how to get the intersection\n # with the axes bbox.\n bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n if self.artist_kws.get(\"clip_on\", True):\n # It seems the above hack undoes the default axes clipping\n bar.set_clip_box(ax.bbox)\n bar.sticky_edges[val_idx][:] = (0, np.inf)\n ax.add_patch(bar)\n\n # Add a container which is useful for, e.g. Axes.bar_label\n if Version(mpl.__version__) >= Version(\"3.4.0\"):\n orientation = {\"x\": \"vertical\", \"y\": \"horizontal\"}[orient]\n container_kws = dict(datavalues=vals, orientation=orientation)\n else:\n container_kws = {}\n container = mpl.container.BarContainer(bars, **container_kws)\n ax.add_container(container)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 136, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 138, "name": "_make_patches", "kind": "ref", "category": "function", "info": " bars, vals = self._make_patches(data, scales, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 146, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " bar.set_linewidth(bar.get_linewidth() * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 146, "name": "get_linewidth", "kind": "ref", "category": "function", "info": " bar.set_linewidth(bar.get_linewidth() * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 147, "name": "get_linestyle", "kind": "ref", "category": "function", "info": " linestyle = bar.get_linestyle()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 150, "name": "set_linestyle", "kind": "ref", "category": "function", "info": " bar.set_linestyle(linestyle)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 158, "name": "get_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 163, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(bar)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 166, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.4.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 166, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.4.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 171, "name": "BarContainer", "kind": "ref", "category": "function", "info": " container = mpl.container.BarContainer(bars, **container_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 172, "name": "add_container", "kind": "ref", "category": "function", "info": " ax.add_container(container)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 177, "name": "Bars", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 190, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 191, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 192, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 193, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(rc=\"patch.edgecolor\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 194, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 195, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(auto=True, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 196, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 199, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 200, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 202, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n val_idx = [\"y\", \"x\"].index(orient)\n\n for _, data, ax in split_gen():\n\n bars, vals = self._make_patches(data, scales, orient)\n\n for bar in bars:\n\n # Because we are clipping the artist (see below), the edges end up\n # looking half as wide as they actually are. I don't love this clumsy\n # workaround, which is going to cause surprises if you work with the\n # artists directly. We may need to revisit after feedback.\n bar.set_linewidth(bar.get_linewidth() * 2)\n linestyle = bar.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))\n bar.set_linestyle(linestyle)\n\n # This is a bit of a hack to handle the fact that the edge lines are\n # centered on the actual extents of the bar, and overlap when bars are\n # stacked or dodged. We may discover that this causes problems and needs\n # to be revisited at some point. Also it should be faster to clip with\n # a bbox than a path, but I cant't work out how to get the intersection\n # with the axes bbox.\n bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n if self.artist_kws.get(\"clip_on\", True):\n # It seems the above hack undoes the default axes clipping\n bar.set_clip_box(ax.bbox)\n bar.sticky_edges[val_idx][:] = (0, np.inf)\n ax.add_patch(bar)\n\n # Add a container which is useful for, e.g. Axes.bar_label\n if Version(mpl.__version__) >= Version(\"3.4.0\"):\n orientation = {\"x\": \"vertical\", \"y\": \"horizontal\"}[orient]\n container_kws = dict(datavalues=vals, orientation=orientation)\n else:\n container_kws = {}\n container = mpl.container.BarContainer(bars, **container_kws)\n ax.add_container(container)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 208, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 209, "name": "_make_patches", "kind": "ref", "category": "function", "info": " bars, _ = self._make_patches(data, scales, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 215, "name": "PatchCollection", "kind": "ref", "category": "function", "info": " col = mpl.collections.PatchCollection(ax_patches, match_original=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 217, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(col, autolim=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 223, "name": "get_paths", "kind": "ref", "category": "function", "info": " xys = np.vstack([path.vertices for path in col.get_paths()])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 224, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 229, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 231, "name": "get_dimensions", "kind": "def", "category": "function", "info": " def get_dimensions(collection):\n edges, widths = [], []\n for verts in (path.vertices for path in collection.get_paths()):\n edges.append(min(verts[:, ori_idx]))\n widths.append(np.ptp(verts[:, ori_idx]))\n return np.array(edges), np.array(widths)\n\n min_width = np.inf\n for ax, col in collections.items():\n edges, widths = get_dimensions(col)\n points = 72 / ax.figure.dpi * abs(\n ax.transData.transform([edges + widths] * 2)\n - ax.transData.transform([edges] * 2)\n )\n min_width = min(min_width, min(points[:, ori_idx]))\n\n linewidth = min(.1 * min_width, mpl.rcParams[\"patch.linewidth\"])\n for _, col in collections.items():\n col.set_linewidth(linewidth)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 233, "name": "get_paths", "kind": "ref", "category": "function", "info": " for verts in (path.vertices for path in collection.get_paths()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 240, "name": "get_dimensions", "kind": "ref", "category": "function", "info": " edges, widths = get_dimensions(col)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 242, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([edges + widths] * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 243, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([edges] * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 249, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " col.set_linewidth(linewidth)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 25, "name": "Mappable", "kind": "def", "category": "class", "info": "__init__\t__repr__\tdepend\tgrouping\tdefault"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 77, "name": "depend", "kind": "def", "category": "function", "info": " def depend(self) -> Any:\n \"\"\"Return the name of the feature to source a default value from.\"\"\"\n return self._depend\n\n @property\n def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 82, "name": "grouping", "kind": "def", "category": "function", "info": " def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 86, "name": "default", "kind": "def", "category": "function", "info": " def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 103, "name": "Mark", "kind": "def", "category": "class", "info": "_mappable_props\t_grouping_props\t_resolve\t_infer_orient\t_plot\t_legend_artist"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 109, "name": "_mappable_props", "kind": "def", "category": "function", "info": " def _mappable_props(self):\n return {\n f.name: getattr(self, f.name) for f in fields(self)\n if isinstance(f.default, Mappable)\n }\n\n @property\n def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n scale = scales[name]\n value = data[name]\n try:\n feature = scale(value)\n except Exception as err:\n raise PlotSpecError._during(\"Scaling operation\", name) from err\n\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 116, "name": "_grouping_props", "kind": "def", "category": "function", "info": " def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n scale = scales[name]\n value = data[name]\n try:\n feature = scale(value)\n except Exception as err:\n raise PlotSpecError._during(\"Scaling operation\", name) from err\n\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 125, "name": "_resolve", "kind": "def", "category": "function", "info": " def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n scale = scales[name]\n value = data[name]\n try:\n feature = scale(value)\n except Exception as err:\n raise PlotSpecError._during(\"Scaling operation\", name) from err\n\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 151, "name": "Property", "kind": "ref", "category": "function", "info": " prop = PROPERTIES.get(name, Property(name))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 163, "name": "standardize", "kind": "ref", "category": "function", "info": " feature = prop.standardize(feature)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 178, "name": "scale", "kind": "ref", "category": "function", "info": " feature = scale(value)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 180, "name": "_during", "kind": "ref", "category": "function", "info": " raise PlotSpecError._during(\"Scaling operation\", name) from err\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 189, "name": "_resolve", "kind": "ref", "category": "function", "info": " return self._resolve(data, feature.depend, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 191, "name": "standardize", "kind": "ref", "category": "function", "info": " default = prop.standardize(feature.default)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 198, "name": "_infer_orient", "kind": "def", "category": "function", "info": " def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 214, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 223, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 230, "name": "resolve_properties", "kind": "def", "category": "function", "info": "def resolve_properties(\n mark: Mark, data: DataFrame, scales: dict[str, Scale]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 235, "name": "_resolve", "kind": "ref", "category": "function", "info": " name: mark._resolve(data, name, scales) for name in mark._mappable_props\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 240, "name": "resolve_color", "kind": "def", "category": "function", "info": "def resolve_color(\n mark: Mark,\n data: DataFrame | dict,\n prefix: str = \"\",\n scales: dict[str, Scale] | None = None,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 266, "name": "_resolve", "kind": "ref", "category": "function", "info": " color = mark._resolve(data, f\"{prefix}color\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 269, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, f\"{prefix}alpha\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 271, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, \"alpha\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 273, "name": "visible", "kind": "def", "category": "function", "info": " def visible(x, axis=None):\n \"\"\"Detect \"invisible\" colors to set alpha appropriately.\"\"\"\n # TODO First clause only needed to handle non-rgba arrays,\n # which we are trying to handle upstream\n return np.array(x).dtype.kind != \"f\" or np.isfinite(x).all(axis)\n\n # Second check here catches vectors of strings with identity scale\n # It could probably be handled better upstream. This is a tricky problem\n if np.ndim(color) < 2 and all(isinstance(x, float) for x in color):\n if len(color) == 4:\n return mpl.colors.to_rgba(color)\n alpha = alpha if visible(color) else np.nan\n return mpl.colors.to_rgba(color, alpha)\n else:\n if np.ndim(color) == 2 and color.shape[1] == 4:\n return mpl.colors.to_rgba_array(color)\n alpha = np.where(visible(color, axis=1), alpha, np.nan)\n return mpl.colors.to_rgba_array(color, alpha)\n\n # TODO should we be implementing fill here too?\n # (i.e. set fillalpha to 0 when fill=False)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 283, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 284, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = alpha if visible(color) else np.nan\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 285, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color, alpha)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 288, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 289, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = np.where(visible(color, axis=1), alpha, np.nan)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 290, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color, alpha)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 296, "name": "document_properties", "kind": "def", "category": "function", "info": "def document_properties(mark):\n\n properties = [f.name for f in fields(mark) if isinstance(f.default, Mappable)]\n text = [\n \"\",\n \" This mark defines the following properties:\",\n textwrap.fill(\n \", \".join([f\"|{p}|\" for p in properties]),\n width=78, initial_indent=\" \" * 8, subsequent_indent=\" \" * 8,\n ),\n ]\n\n docstring_lines = mark.__doc__.split(\"\\n\")\n new_docstring = \"\\n\".join([\n *docstring_lines[:2],\n *text,\n *docstring_lines[2:],\n ])\n mark.__doc__ = new_docstring\n return mark\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 26, "name": "DotBase", "kind": "def", "category": "class", "info": "_resolve_paths\t_resolve_properties\t_plot\t_legend_artist"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 28, "name": "_resolve_paths", "kind": "def", "category": "function", "info": " def _resolve_paths(self, data):\n\n paths = []\n path_cache = {}\n marker = data[\"marker\"]\n\n def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 34, "name": "get_transformed_path", "kind": "def", "category": "function", "info": " def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 35, "name": "get_path", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 35, "name": "transformed", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 38, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " return get_transformed_path(marker)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 42, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " path_cache[m] = get_transformed_path(m)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 46, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 48, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 49, "name": "_resolve_paths", "kind": "ref", "category": "function", "info": " resolved[\"path\"] = self._resolve_paths(resolved)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 53, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = resolved[\"marker\"].is_filled()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 55, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 61, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 67, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 70, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " data = self._resolve_properties(data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 72, "name": "PathCollection", "kind": "ref", "category": "function", "info": " points = mpl.collections.PathCollection(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 81, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 84, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(points)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 86, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 91, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " res = self._resolve_properties(key, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 93, "name": "PathCollection", "kind": "ref", "category": "function", "info": " return mpl.collections.PathCollection(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 100, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 107, "name": "Dot", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 120, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(\"o\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 121, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(6, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 122, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 123, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 124, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 125, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 126, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 127, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(depend=\"alpha\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 128, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(.5, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 129, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 131, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 133, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 140, "name": "resolve_color", "kind": "ref", "category": "function", "info": " main_color = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 141, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edge_color = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 161, "name": "Dots", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 175, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"scatter.marker\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 176, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(4, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 177, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 178, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 179, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False) # TODO auto alpha?\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 180, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 181, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 182, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillalpha: MappableFloat = Mappable(.2, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 184, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 186, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 188, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 189, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 36, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 38, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 39, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 40, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"lines.marker\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 41, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(rc=\"lines.markersize\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 42, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 43, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 44, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"lines.markeredgewidth\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 48, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 50, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 52, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 53, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 54, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 55, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 58, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 58, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 62, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient, kind=\"mergesort\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 65, "name": "_handle_capstyle", "kind": "ref", "category": "function", "info": " self._handle_capstyle(artist_kws, vals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 67, "name": "Line2D", "kind": "ref", "category": "function", "info": " line = mpl.lines.Line2D(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 68, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"x\"].to_numpy(),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 69, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"y\"].to_numpy(),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 80, "name": "add_line", "kind": "ref", "category": "function", "info": " ax.add_line(line)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 82, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 85, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 86, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 87, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 88, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 91, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 91, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 95, "name": "_handle_capstyle", "kind": "ref", "category": "function", "info": " self._handle_capstyle(artist_kws, vals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 97, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 110, "name": "_handle_capstyle", "kind": "def", "category": "function", "info": " def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 121, "name": "Line", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 140, "name": "Paths", "kind": "def", "category": "class", "info": "__post_init__\t_plot\t_legend_artist\t_setup_segments"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 153, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 154, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 155, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 156, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 160, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n # LineCollection artists have a capstyle property but don't source its value\n # from the rc, so we do that manually here. Unfortunately, because we add\n # only one LineCollection, we have the use the same capstyle for all lines\n # even when they are dashed. It's a slight inconsistency, but looks fine IMO.\n self.artist_kws.setdefault(\"capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n\n def _plot(self, split_gen, scales, orient):\n\n line_data = {}\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n if ax not in line_data:\n line_data[ax] = {\n \"segments\": [],\n \"colors\": [],\n \"linewidths\": [],\n \"linestyles\": [],\n }\n\n segments = self._setup_segments(data, orient)\n line_data[ax][\"segments\"].extend(segments)\n n = len(segments)\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n\n line_data[ax][\"colors\"].extend([vals[\"color\"]] * n)\n line_data[ax][\"linewidths\"].extend([vals[\"linewidth\"]] * n)\n line_data[ax][\"linestyles\"].extend([vals[\"linestyle\"]] * n)\n\n for ax, ax_data in line_data.items():\n lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n # Handle datalim update manually\n # https://github.com/matplotlib/matplotlib/issues/23129\n ax.add_collection(lines, autolim=False)\n if ax_data[\"segments\"]:\n xy = np.concatenate(ax_data[\"segments\"])\n ax.update_datalim(xy)\n\n def _legend_artist(self, variables, value, scales):\n\n key = resolve_properties(self, {v: value for v in variables}, scales)\n\n artist_kws = self.artist_kws.copy()\n capstyle = artist_kws.pop(\"capstyle\")\n artist_kws[\"solid_capstyle\"] = capstyle\n artist_kws[\"dash_capstyle\"] = capstyle\n\n return mpl.lines.Line2D(\n [], [],\n color=key[\"color\"],\n linewidth=key[\"linewidth\"],\n linestyle=key[\"linestyle\"],\n **artist_kws,\n )\n\n def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 168, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 171, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 181, "name": "_setup_segments", "kind": "ref", "category": "function", "info": " segments = self._setup_segments(data, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 185, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 186, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 193, "name": "LineCollection", "kind": "ref", "category": "function", "info": " lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 196, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines, autolim=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 199, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xy)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 201, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n # https://github.com/matplotlib/matplotlib/pull/16692\n if Version(mpl.__version__) < Version(\"3.3.0\"):\n vals[\"marker\"] = vals[\"marker\"]._marker\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 203, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " key = resolve_properties(self, {v: value for v in variables}, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 210, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 218, "name": "_setup_segments", "kind": "def", "category": "function", "info": " def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 221, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient, kind=\"mergesort\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 231, "name": "Lines", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 249, "name": "Range", "kind": "def", "category": "class", "info": "_setup_segments"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 258, "name": "_setup_segments", "kind": "def", "category": "function", "info": " def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 265, "name": "groupby", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 265, "name": "agg", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 265, "name": "reset_index", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 268, "name": "melt", "kind": "ref", "category": "function", "info": " data = data[cols].melt(orient, value_name=val)[[\"x\", \"y\"]]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 269, "name": "to_numpy", "kind": "ref", "category": "function", "info": " segments = [d.to_numpy() for _, d in data.groupby(orient)]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 269, "name": "groupby", "kind": "ref", "category": "function", "info": " segments = [d.to_numpy() for _, d in data.groupby(orient)]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 275, "name": "Dash", "kind": "def", "category": "class", "info": "_setup_segments"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 284, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(.8, grouping=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 286, "name": "_setup_segments", "kind": "def", "category": "function", "info": " def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 289, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[\"x\", \"y\"]].to_numpy().astype(float)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 289, "name": "astype", "kind": "ref", "category": "function", "info": " xys = data[[\"x\", \"y\"]].to_numpy().astype(float)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 22, "name": "Text", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 31, "name": "Mappable", "kind": "ref", "category": "function", "info": " text: MappableString = Mappable(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 32, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"k\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 33, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 34, "name": "Mappable", "kind": "ref", "category": "function", "info": " fontsize: MappableFloat = Mappable(rc=\"font.size\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 35, "name": "Mappable", "kind": "ref", "category": "function", "info": " halign: MappableString = Mappable(\"center\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 36, "name": "Mappable", "kind": "ref", "category": "function", "info": " valign: MappableString = Mappable(\"center_baseline\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " offset: MappableFloat = Mappable(4)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 39, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n ax_data = defaultdict(list)\n\n for keys, data, ax in split_gen():\n\n vals = resolve_properties(self, keys, scales)\n color = resolve_color(self, keys, \"\", scales)\n\n halign = vals[\"halign\"]\n valign = vals[\"valign\"]\n fontsize = vals[\"fontsize\"]\n offset = vals[\"offset\"] / 72\n\n offset_trans = ScaledTranslation(\n {\"right\": -offset, \"left\": +offset}.get(halign, 0),\n {\"top\": -offset, \"bottom\": +offset, \"baseline\": +offset}.get(valign, 0),\n ax.figure.dpi_scale_trans,\n )\n\n for row in data.to_dict(\"records\"):\n artist = mpl.text.Text(\n x=row[\"x\"],\n y=row[\"y\"],\n text=str(row.get(\"text\", vals[\"text\"])),\n color=color,\n fontsize=fontsize,\n horizontalalignment=halign,\n verticalalignment=valign,\n transform=ax.transData + offset_trans,\n **self.artist_kws,\n )\n ax.add_artist(artist)\n ax_data[ax].append([row[\"x\"], row[\"y\"]])\n\n for ax, ax_vals in ax_data.items():\n ax.update_datalim(np.array(ax_vals))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 43, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 45, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 46, "name": "resolve_color", "kind": "ref", "category": "function", "info": " color = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 59, "name": "to_dict", "kind": "ref", "category": "function", "info": " for row in data.to_dict(\"records\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 60, "name": "Text", "kind": "ref", "category": "function", "info": " artist = mpl.text.Text(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 71, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(artist)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 75, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(np.array(ax_vals))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 28, "name": "SemanticMapping", "kind": "def", "category": "class", "info": "__init__\tmap\t_check_list_length\t_lookup_single\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 54, "name": "cls", "kind": "ref", "category": "function", "info": " setattr(plotter, method_name, cls(plotter, *args, **kwargs))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 57, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels, values, variable):\n \"\"\"Input check when values are provided as a list.\"\"\"\n # Copied from _core/properties; eventually will be replaced for that.\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n if message:\n warnings.warn(message, UserWarning, stacklevel=6)\n\n return values\n\n def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 81, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 88, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return [self._lookup_single(k, *args, **kwargs) for k in key]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 90, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return self._lookup_single(key, *args, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 94, "name": "HueMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\tinfer_map_type\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 125, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 138, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, cmap = self.numeric_mapping(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 147, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 156, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 169, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 185, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 192, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 194, "name": "cmap", "kind": "ref", "category": "function", "info": " value = self.cmap(normed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 197, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, palette, norm, input_format, var_type):\n \"\"\"Determine how to implement the mapping.\"\"\"\n if palette in QUAL_PALETTES:\n map_type = \"categorical\"\n elif norm is not None:\n map_type = \"numeric\"\n elif isinstance(palette, (dict, list)):\n map_type = \"categorical\"\n elif input_format == \"wide\":\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n colors = self._check_list_length(levels, palette, \"palette\")\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 212, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n colors = self._check_list_length(levels, palette, \"palette\")\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 216, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 233, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n_colors <= len(get_color_cycle()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 234, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(None, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 236, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 238, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, palette, \"palette\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 240, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 246, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 254, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 260, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 271, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(palette, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 275, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 277, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 282, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 283, "name": "norm", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 283, "name": "dropna", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 285, "name": "cmap", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 285, "name": "norm", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 291, "name": "SizeMapping", "kind": "def", "category": "class", "info": "__init__\tinfer_map_type\t_lookup_single\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 312, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 320, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, size_range = self.numeric_mapping(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 328, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 338, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 352, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, norm, sizes, var_type):\n\n if norm is not None:\n map_type = \"numeric\"\n elif isinstance(sizes, (dict, list)):\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def _lookup_single(self, key):\n\n try:\n value = self.lookup_table[key]\n except KeyError:\n normed = self.norm(key)\n if np.ma.is_masked(normed):\n normed = np.nan\n value = self.size_range[0] + normed * np.ptp(self.size_range)\n return value\n\n def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n sizes = self._check_list_length(levels, sizes, \"sizes\")\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 363, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 368, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 369, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 374, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n sizes = self._check_list_length(levels, sizes, \"sizes\")\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 376, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 390, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " sizes = self._check_list_length(levels, sizes, \"sizes\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 432, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 445, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 480, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 483, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 495, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 496, "name": "norm", "kind": "ref", "category": "function", "info": " norm(levels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 499, "name": "norm", "kind": "ref", "category": "function", "info": " sizes_scaled = norm(levels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 513, "name": "StyleMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\t_map_attributes"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 536, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data) == \"datetime\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 540, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 542, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " markers = self._map_attributes(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 543, "name": "unique_markers", "kind": "ref", "category": "function", "info": " markers, levels, unique_markers(len(levels)), \"markers\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 545, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " dashes = self._map_attributes(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 546, "name": "unique_dashes", "kind": "ref", "category": "function", "info": " dashes, levels, unique_dashes(len(levels)), \"dashes\",\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 554, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 555, "name": "get_path", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 555, "name": "transformed", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 555, "name": "get_transform", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 556, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_markers.append(m.is_filled())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 579, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key, attr=None):\n \"\"\"Get attribute(s) for a given data point.\"\"\"\n if attr is None:\n value = self.lookup_table[key]\n else:\n value = self.lookup_table[key][attr]\n return value\n\n def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n arg = self._check_list_length(levels, arg, attr)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 587, "name": "_map_attributes", "kind": "def", "category": "function", "info": " def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n arg = self._check_list_length(levels, arg, attr)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 598, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " arg = self._check_list_length(levels, arg, attr)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 612, "name": "VectorPlotter", "kind": "def", "category": "class", "info": "__init__\tget_semantics\thas_xy_data\tvar_levels\tassign_variables\t_assign_variables_wideform\t_assign_variables_longform\titer_data\tcomp_data\t_get_axes\t_attach\t_log_scaled\t_add_axis_labels\tscale_native\tscale_numeric\tscale_datetime\tscale_categorical"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 639, "name": "assign_variables", "kind": "ref", "category": "function", "info": " self.assign_variables(data, variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 651, "name": "get_semantics", "kind": "def", "category": "function", "info": " def get_semantics(cls, kwargs, semantics=None):\n \"\"\"Subset a dictionary arguments with known semantic variables.\"\"\"\n # TODO this should be get_variables since we have included x and y\n if semantics is None:\n semantics = cls.semantics\n variables = {}\n for key, val in kwargs.items():\n if key in semantics and val is not None:\n variables[key] = val\n return variables\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 663, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 668, "name": "var_levels", "kind": "def", "category": "function", "info": " def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 688, "name": "assign_variables", "kind": "def", "category": "function", "info": " def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 695, "name": "_assign_variables_wideform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_wideform(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 700, "name": "_assign_variables_longform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_longform(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 707, "name": "variable_type", "kind": "ref", "category": "function", "info": " v: variable_type(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 716, "name": "_assign_variables_wideform", "kind": "def", "category": "function", "info": " def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 817, "name": "variable_type", "kind": "ref", "category": "function", "info": " k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 829, "name": "add_categories", "kind": "ref", "category": "function", "info": " wide_data.columns = wide_data.columns.add_categories(\"@index\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 834, "name": "to_series", "kind": "ref", "category": "function", "info": " wide_data[\"@index\"] = wide_data.index.to_series()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 858, "name": "_assign_variables_longform", "kind": "def", "category": "function", "info": " def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 902, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = data.index.to_frame()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 972, "name": "iter_data", "kind": "def", "category": "function", "info": " def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1032, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1044, "name": "convert_units", "kind": "ref", "category": "function", "info": " levels[axis] = converter.convert_units(levels[axis])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1050, "name": "date2num", "kind": "ref", "category": "function", "info": " levels[axis] = mpl.dates.date2num(levels[axis])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1051, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1074, "name": "get_group", "kind": "ref", "category": "function", "info": " data_subset = grouped_data.get_group(pd_key)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1094, "name": "comp_data", "kind": "def", "category": "function", "info": " def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig))\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1108, "name": "drop", "kind": "ref", "category": "function", "info": " .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1119, "name": "dropna", "kind": "ref", "category": "function", "info": " orig = orig.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1125, "name": "convert_units", "kind": "ref", "category": "function", "info": " comp = pd.to_numeric(converter.convert_units(orig))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1126, "name": "get_scale", "kind": "ref", "category": "function", "info": " if converter.get_scale() == \"log\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1139, "name": "_get_axes", "kind": "def", "category": "function", "info": " def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1154, "name": "_attach", "kind": "def", "category": "function", "info": " def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n if Version(mpl.__version__) >= Version(\"3.3\"):\n set_scale(\"log\", base=scale)\n else:\n set_scale(\"log\", **{f\"base{axis}\": scale})\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1179, "name": "flatten", "kind": "ref", "category": "function", "info": " ax_list = obj.axes.flatten()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1234, "name": "iter_data", "kind": "ref", "category": "function", "info": " for axes_vars, axes_data in self.iter_data():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1235, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(axes_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1260, "name": "categorical_order", "kind": "ref", "category": "function", "info": " seed_data = categorical_order(seed_data, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1261, "name": "update_units", "kind": "ref", "category": "function", "info": " converter.update_units(seed_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1282, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1284, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.3\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1284, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) >= Version(\"3.3\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1285, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", base=scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1287, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", **{f\"base{axis}\": scale})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1293, "name": "set_inverted", "kind": "ref", "category": "function", "info": " ax.yaxis.set_inverted(True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1295, "name": "yaxis_inverted", "kind": "ref", "category": "function", "info": " if not ax.yaxis_inverted():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1296, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1300, "name": "_log_scaled", "kind": "def", "category": "function", "info": " def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1306, "name": "flatten", "kind": "ref", "category": "function", "info": " axes_list = self.facets.axes.flatten()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1313, "name": "get_scale", "kind": "ref", "category": "function", "info": " log_scaled.append(data_axis.get_scale() == \"log\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1320, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1326, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " if not ax.get_xlabel():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1327, "name": "get_visible", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1327, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1328, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1329, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " if not ax.get_ylabel():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1330, "name": "get_visible", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1330, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1331, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1338, "name": "scale_native", "kind": "def", "category": "function", "info": " def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1344, "name": "scale_numeric", "kind": "def", "category": "function", "info": " def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1351, "name": "scale_datetime", "kind": "def", "category": "function", "info": " def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1358, "name": "scale_categorical", "kind": "def", "category": "function", "info": " def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector\n cat_data = self.plot_data[axis]\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order))\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1400, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"axis\", [\"x\", \"y\"], axis)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1417, "name": "sort_values", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1427, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = pd.Index(categorical_order(cat_data, order))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1437, "name": "astype", "kind": "ref", "category": "function", "info": " cat_data = cat_data.astype(str)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1438, "name": "astype", "kind": "ref", "category": "function", "info": " order = order.astype(str)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1452, "name": "VariableType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1472, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(vector, boolean_type=\"numeric\"):\n \"\"\"\n Determine whether a vector contains numeric, categorical, or datetime data.\n\n This function differs from the pandas typing API in two ways:\n\n - Python sequences or object-typed PyData objects are considered numeric if\n all of their entries are numeric.\n - String or mixed-type data are considered categorical even if not\n explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.\n\n Parameters\n ----------\n vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence\n Input data to test.\n boolean_type : 'numeric' or 'categorical'\n Type to use for vectors containing only 0s and 1s (and NAs).\n\n Returns\n -------\n var_type : 'numeric', 'categorical', or 'datetime'\n Name identifying the type of data in the vector.\n \"\"\"\n\n # If a categorical dtype is set, infer categorical\n if pd.api.types.is_categorical_dtype(vector):\n return VariableType(\"categorical\")\n\n # Special-case all-na data, which is always \"numeric\"\n if pd.isna(vector).all():\n return VariableType(\"numeric\")\n\n # Special-case binary/boolean data, allow caller to determine\n # This triggers a numpy warning when vector has strings/objects\n # https://github.com/numpy/numpy/issues/6784\n # Because we reduce with .all(), we are agnostic about whether the\n # comparison returns a scalar or vector, so we will ignore the warning.\n # It triggers a separate DeprecationWarning when the vector has datetimes:\n # https://github.com/numpy/numpy/issues/13548\n # This is considered a bug by numpy and will likely go away.\n with warnings.catch_warnings():\n warnings.simplefilter(\n action='ignore', category=(FutureWarning, DeprecationWarning)\n )\n if np.isin(vector, [0, 1, np.nan]).all():\n return VariableType(boolean_type)\n\n # Defer to positive pandas tests\n if pd.api.types.is_numeric_dtype(vector):\n return VariableType(\"numeric\")\n\n if pd.api.types.is_datetime64_dtype(vector):\n return VariableType(\"datetime\")\n\n # --- If we get to here, we need to check the entries\n\n # Check for a collection where everything is a number\n\n def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1497, "name": "is_categorical_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_categorical_dtype(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1498, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1502, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1517, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(boolean_type)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1520, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1521, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1523, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1524, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1530, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1536, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1537, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1541, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1547, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1548, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1552, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1555, "name": "infer_orient", "kind": "def", "category": "function", "info": "def infer_orient(x=None, y=None, orient=None, require_numeric=True):\n \"\"\"Determine how the plot should be oriented based on the data.\n\n For historical reasons, the convention is to call a plot \"horizontally\"\n or \"vertically\" oriented based on the axis representing its dependent\n variable. Practically, this is used when determining the axis for\n numerical aggregation.\n\n Parameters\n ----------\n x, y : Vector data or None\n Positional data vectors for the plot.\n orient : string or None\n Specified orientation, which must start with \"v\" or \"h\" if not None.\n require_numeric : bool\n If set, raise when the implied dependent variable is not numeric.\n\n Returns\n -------\n orient : \"v\" or \"h\"\n\n Raises\n ------\n ValueError: When `orient` is not None and does not start with \"h\" or \"v\"\n TypeError: When dependent variable is not numeric, with `require_numeric`\n\n \"\"\"\n\n x_type = None if x is None else variable_type(x)\n y_type = None if y is None else variable_type(y)\n\n nonnumeric_dv_error = \"{} orientation requires numeric `{}` variable.\"\n single_var_warning = \"{} orientation ignored with only `{}` specified.\"\n\n if x is None:\n if str(orient).startswith(\"h\"):\n warnings.warn(single_var_warning.format(\"Horizontal\", \"y\"))\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"v\"\n\n elif y is None:\n if str(orient).startswith(\"v\"):\n warnings.warn(single_var_warning.format(\"Vertical\", \"x\"))\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"h\"\n\n elif str(orient).startswith(\"v\"):\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"v\"\n\n elif str(orient).startswith(\"h\"):\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"h\"\n\n elif orient is not None:\n err = (\n \"`orient` must start with 'v' or 'h' or be None, \"\n f\"but `{repr(orient)}` was passed.\"\n )\n raise ValueError(err)\n\n elif x_type != \"categorical\" and y_type == \"categorical\":\n return \"h\"\n\n elif x_type != \"numeric\" and y_type == \"numeric\":\n return \"v\"\n\n elif x_type == \"numeric\" and y_type != \"numeric\":\n return \"h\"\n\n elif require_numeric and \"numeric\" not in (x_type, y_type):\n err = \"Neither the `x` nor `y` variable appears to be numeric.\"\n raise TypeError(err)\n\n else:\n return \"v\"\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1583, "name": "variable_type", "kind": "ref", "category": "function", "info": " x_type = None if x is None else variable_type(x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1584, "name": "variable_type", "kind": "ref", "category": "function", "info": " y_type = None if y is None else variable_type(y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1637, "name": "unique_dashes", "kind": "def", "category": "function", "info": "def unique_dashes(n):\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes = [\n \"\",\n (4, 1.5),\n (1, 1),\n (3, 1.25, 1.5, 1.25),\n (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(\n list(a)[1:-1][::-1],\n list(b)[1:-1]\n ))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return dashes[:n]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1688, "name": "unique_markers", "kind": "def", "category": "function", "info": "def unique_markers(n):\n \"\"\"Build an arbitrarily long list of unique marker styles for points.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\",\n \"X\",\n (4, 0, 45),\n \"P\",\n (4, 0, 0),\n (4, 1, 0),\n \"^\",\n (4, 1, 45),\n \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([\n (s + 1, 1, a),\n (s + 1, 0, a),\n (s, 1, 0),\n (s, 0, 0),\n ])\n s += 1\n\n # Convert to MarkerStyle object, using only exactly what we need\n # markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]\n\n return markers[:n]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1734, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector, order=None):\n \"\"\"Return a list of unique data values.\n\n Determine an ordered list of levels in ``values``.\n\n Parameters\n ----------\n vector : list, array, Categorical, or Series\n Vector of \"categorical\" values\n order : list-like, optional\n Desired order of category levels to override the order determined\n from the ``values`` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is None:\n if hasattr(vector, \"categories\"):\n order = vector.categories\n else:\n try:\n order = vector.cat.categories\n except (TypeError, AttributeError):\n\n try:\n order = vector.unique()\n except AttributeError:\n order = pd.unique(vector)\n\n if variable_type(vector) == \"numeric\":\n order = np.sort(order)\n\n order = filter(pd.notnull, order)\n return list(order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1766, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(vector) == \"numeric\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 40, "name": "KDE", "kind": "def", "category": "class", "info": "__init__\t_define_support_grid\t_define_support_univariate\t_define_support_bivariate\tdefine_support\t_fit\t_eval_univariate\t_eval_bivariate\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 88, "name": "_define_support_grid", "kind": "def", "category": "function", "info": " def _define_support_grid(self, x, bw, cut, clip, gridsize):\n \"\"\"Create the grid of evaluation points depending for vector x.\"\"\"\n clip_lo = -np.inf if clip[0] is None else clip[0]\n clip_hi = +np.inf if clip[1] is None else clip[1]\n gridmin = max(x.min() - bw * cut, clip_lo)\n gridmax = min(x.max() + bw * cut, clip_hi)\n return np.linspace(gridmin, gridmax, gridsize)\n\n def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 96, "name": "_define_support_univariate", "kind": "def", "category": "function", "info": " def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 98, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 100, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid = self._define_support_grid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 105, "name": "_define_support_bivariate", "kind": "def", "category": "function", "info": " def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 111, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 114, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid1 = self._define_support_grid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 117, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid2 = self._define_support_grid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 123, "name": "define_support", "kind": "def", "category": "function", "info": " def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 126, "name": "_define_support_univariate", "kind": "ref", "category": "function", "info": " support = self._define_support_univariate(x1, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 128, "name": "_define_support_bivariate", "kind": "ref", "category": "function", "info": " support = self._define_support_bivariate(x1, x2, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 135, "name": "_fit", "kind": "def", "category": "function", "info": " def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 146, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 150, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x, cache=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 152, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 160, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde(support)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 164, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 168, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x1, x2, cache=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 170, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 184, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 191, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 193, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 198, "name": "Histogram", "kind": "def", "category": "class", "info": "__init__\t_define_bin_edges\tdefine_bin_params\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 242, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", stat_choices, stat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 253, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n if binrange is None:\n start, stop = x.min(), x.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n # Handle roundoff error (maybe there is a less clumsy way?)\n if bin_edges.max() < stop or len(bin_edges) < 2:\n bin_edges = np.append(bin_edges, bin_edges.max() + step)\n else:\n bin_edges = np.histogram_bin_edges(\n x, bins, binrange, weights,\n )\n return bin_edges\n\n def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 274, "name": "define_bin_params", "kind": "def", "category": "function", "info": " def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 278, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 323, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges.append(self._define_bin_edges(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 334, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 338, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x1, x2, cache=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 352, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 354, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 356, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / area\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 366, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 370, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 378, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 380, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 382, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / np.diff(bin_edges)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 395, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 397, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 400, "name": "ECDF", "kind": "def", "category": "class", "info": "__init__\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 413, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", [\"count\", \"proportion\"], stat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 417, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 421, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 448, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 450, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 453, "name": "EstimateAggregator", "kind": "def", "category": "class", "info": "__init__\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 473, "name": "_validate_errorbar_arg", "kind": "ref", "category": "function", "info": " method, level = _validate_errorbar_arg(errorbar)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 485, "name": "estimator", "kind": "ref", "category": "function", "info": " estimate = self.estimator(vals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 487, "name": "agg", "kind": "ref", "category": "function", "info": " estimate = vals.agg(self.estimator)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 497, "name": "error_method", "kind": "ref", "category": "function", "info": " err_min, err_max = self.error_method(vals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 504, "name": "sem", "kind": "ref", "category": "function", "info": " half_interval = vals.sem() * self.error_level\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 509, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(vals, self.error_level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 512, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 513, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(boots, self.error_level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 518, "name": "_percentile_interval", "kind": "def", "category": "function", "info": "def _percentile_interval(data, width):\n \"\"\"Return a percentile interval from data of a given width.\"\"\"\n edge = (100 - width) / 2\n percentiles = edge, 100 - edge\n return np.nanpercentile(data, percentiles)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 525, "name": "_validate_errorbar_arg", "kind": "def", "category": "function", "info": "def _validate_errorbar_arg(arg):\n \"\"\"Check type and value of errorbar argument and assign default level.\"\"\"\n DEFAULT_LEVELS = {\n \"ci\": 95,\n \"pi\": 95,\n \"se\": 1,\n \"sd\": 1,\n }\n\n usage = \"`errorbar` must be a callable, string, or (string, number) tuple\"\n\n if arg is None:\n return None, None\n elif callable(arg):\n return arg, None\n elif isinstance(arg, str):\n method = arg\n level = DEFAULT_LEVELS.get(method, None)\n else:\n try:\n method, level = arg\n except (ValueError, TypeError) as err:\n raise err.__class__(usage) from err\n\n _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n if level is not None and not isinstance(level, Number):\n raise TypeError(usage)\n\n return method, level\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 549, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 15, "name": "Agg", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 52, "name": "Est", "kind": "def", "category": "class", "info": "_process\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 84, "name": "_process", "kind": "def", "category": "function", "info": " def _process(\n self, data: DataFrame, var: str, estimator: EstimateAggregator\n ) -> DataFrame:\n # Needed because GroupBy.apply assumes func is DataFrame -> DataFrame\n # which we could probably make more general to allow Series return\n res = estimator(data, var)\n return pd.DataFrame([res])\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n boot_kws = {\"n_boot\": self.n_boot, \"seed\": self.seed}\n engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n\n var = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res = (\n groupby\n .apply(data, self._process, var, engine)\n .dropna(subset=[var])\n .reset_index(drop=True)\n )\n\n res = res.fillna({f\"{var}min\": res[var], f\"{var}max\": res[var]})\n\n return res\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 89, "name": "estimator", "kind": "ref", "category": "function", "info": " res = estimator(data, var)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 97, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 113, "name": "Rolling", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 15, "name": "Stat", "kind": "def", "category": "class", "info": "_check_param_one_of\t_check_grouping_vars\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 32, "name": "_check_param_one_of", "kind": "def", "category": "function", "info": " def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None:\n \"\"\"Raise when parameter value is not one of a specified set.\"\"\"\n value = getattr(self, param)\n if value not in options:\n *most, last = options\n option_str = \", \".join(f\"{x!r}\" for x in most[:-1]) + f\" or {last!r}\"\n err = \" \".join([\n f\"The `{param}` parameter for `{self.__class__.__name__}` must be\",\n f\"one of {option_str}; not {value!r}.\",\n ])\n raise ValueError(err)\n\n def _check_grouping_vars(\n self, param: str, data_vars: list[str], stacklevel: int = 2,\n ) -> None:\n \"\"\"Warn if vars are named in parameter without being present in the data.\"\"\"\n param_vars = getattr(self, param)\n undefined = set(param_vars) - set(data_vars)\n if undefined:\n param = f\"{self.__class__.__name__}.{param}\"\n names = \", \".join(f\"{x!r}\" for x in undefined)\n msg = f\"Undefined variable(s) passed for {param}: {names}.\"\n warnings.warn(msg, stacklevel=stacklevel)\n\n def __call__(\n self,\n data: DataFrame,\n groupby: GroupBy,\n orient: str,\n scales: dict[str, Scale],\n ) -> DataFrame:\n \"\"\"Apply statistical transform to data subgroups and return combined result.\"\"\"\n return data\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 44, "name": "_check_grouping_vars", "kind": "def", "category": "function", "info": " def _check_grouping_vars(\n self, param: str, data_vars: list[str], stacklevel: int = 2,\n ) -> None:\n \"\"\"Warn if vars are named in parameter without being present in the data.\"\"\"\n param_vars = getattr(self, param)\n undefined = set(param_vars) - set(data_vars)\n if undefined:\n param = f\"{self.__class__.__name__}.{param}\"\n names = \", \".join(f\"{x!r}\" for x in undefined)\n msg = f\"Undefined variable(s) passed for {param}: {names}.\"\n warnings.warn(msg, stacklevel=stacklevel)\n\n def __call__(\n self,\n data: DataFrame,\n groupby: GroupBy,\n orient: str,\n scales: dict[str, Scale],\n ) -> DataFrame:\n \"\"\"Apply statistical transform to data subgroups and return combined result.\"\"\"\n return data\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 18, "name": "Count", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 49, "name": "Hist", "kind": "def", "category": "class", "info": "__post_init__\t_define_bin_edges\t_define_bin_params\t_get_bins_and_eval\t_eval\t_normalize\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 114, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n stat_options = [\n \"count\", \"density\", \"percent\", \"probability\", \"proportion\", \"frequency\"\n ]\n self._check_param_one_of(\"stat\", stat_options)\n\n def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n vals = vals.dropna()\n\n if binrange is None:\n start, stop = vals.min(), vals.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n else:\n bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n\n # TODO warning or cap on too many bins?\n\n return bin_edges\n\n def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weights, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 119, "name": "_check_param_one_of", "kind": "ref", "category": "function", "info": " self._check_param_one_of(\"stat\", stat_options)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 121, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n vals = vals.dropna()\n\n if binrange is None:\n start, stop = vals.min(), vals.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n else:\n bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n\n # TODO warning or cap on too many bins?\n\n return bin_edges\n\n def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weights, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 142, "name": "_define_bin_params", "kind": "def", "category": "function", "info": " def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weights, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 151, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 164, "name": "_get_bins_and_eval", "kind": "def", "category": "function", "info": " def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 166, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 169, "name": "_eval", "kind": "def", "category": "function", "info": " def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 182, "name": "_normalize", "kind": "def", "category": "function", "info": " def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 207, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 211, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 213, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(self.common_bins)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 214, "name": "_check_grouping_vars", "kind": "ref", "category": "function", "info": " self._check_grouping_vars(\"common_bins\", grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 221, "name": "_normalize", "kind": "ref", "category": "function", "info": " data = self._normalize(data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 224, "name": "GroupBy", "kind": "ref", "category": "function", "info": " norm_groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 226, "name": "GroupBy", "kind": "ref", "category": "function", "info": " norm_groupby = GroupBy(self.common_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 227, "name": "_check_grouping_vars", "kind": "ref", "category": "function", "info": " self._check_grouping_vars(\"common_norm\", grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 21, "name": "KDE", "kind": "def", "category": "class", "info": "__post_init__\t_check_var_list_or_boolean\t_fit\t_get_support\t_fit_and_evaluate\t_transform\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 93, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n if self.cumulative and _no_scipy:\n raise RuntimeError(\"Cumulative KDE evaluation requires scipy\")\n\n def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None:\n \"\"\"Do input checks on grouping parameters.\"\"\"\n value = getattr(self, param)\n if not (\n isinstance(value, bool)\n or (isinstance(value, list) and all(isinstance(v, str) for v in value))\n ):\n param_name = f\"{self.__class__.__name__}.{param}\"\n raise TypeError(f\"{param_name} must be a boolean or list of strings.\")\n self._check_grouping_vars(param, grouping_vars, stacklevel=3)\n\n def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n \"\"\"Fit and return a KDE object.\"\"\"\n # TODO need to handle singular data\n\n fit_kws: dict[str, Any] = {\"bw_method\": self.bw_method}\n if \"weight\" in data:\n fit_kws[\"weights\"] = data[\"weight\"]\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 98, "name": "_check_var_list_or_boolean", "kind": "def", "category": "function", "info": " def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None:\n \"\"\"Do input checks on grouping parameters.\"\"\"\n value = getattr(self, param)\n if not (\n isinstance(value, bool)\n or (isinstance(value, list) and all(isinstance(v, str) for v in value))\n ):\n param_name = f\"{self.__class__.__name__}.{param}\"\n raise TypeError(f\"{param_name} must be a boolean or list of strings.\")\n self._check_grouping_vars(param, grouping_vars, stacklevel=3)\n\n def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n \"\"\"Fit and return a KDE object.\"\"\"\n # TODO need to handle singular data\n\n fit_kws: dict[str, Any] = {\"bw_method\": self.bw_method}\n if \"weight\" in data:\n fit_kws[\"weights\"] = data[\"weight\"]\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 107, "name": "_check_grouping_vars", "kind": "ref", "category": "function", "info": " self._check_grouping_vars(param, grouping_vars, stacklevel=3)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 109, "name": "_fit", "kind": "def", "category": "function", "info": " def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n \"\"\"Fit and return a KDE object.\"\"\"\n # TODO need to handle singular data\n\n fit_kws: dict[str, Any] = {\"bw_method\": self.bw_method}\n if \"weight\" in data:\n fit_kws[\"weights\"] = data[\"weight\"]\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 121, "name": "_get_support", "kind": "def", "category": "function", "info": " def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 126, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(data, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 132, "name": "_fit_and_evaluate", "kind": "def", "category": "function", "info": " def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 140, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(data, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 148, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde(support)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 153, "name": "_transform", "kind": "def", "category": "function", "info": " def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 161, "name": "_get_support", "kind": "ref", "category": "function", "info": " support = self._get_support(data, orient)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 167, "name": "_fit_and_evaluate", "kind": "ref", "category": "function", "info": " return self._fit_and_evaluate(data, orient, support)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 168, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 182, "name": "_transform", "kind": "ref", "category": "function", "info": " res = self._transform(data, orient, grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 187, "name": "_check_var_list_or_boolean", "kind": "ref", "category": "function", "info": " self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 191, "name": "GroupBy", "kind": "ref", "category": "function", "info": " GroupBy(grid_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 202, "name": "_check_var_list_or_boolean", "kind": "ref", "category": "function", "info": " self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 37, "name": "Perc", "kind": "def", "category": "class", "info": "_percentile\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 61, "name": "_percentile", "kind": "def", "category": "function", "info": " def _percentile(self, data: DataFrame, var: str) -> DataFrame:\n\n k = list(np.linspace(0, 100, self.k)) if isinstance(self.k, int) else self.k\n method = cast(_MethodKind, self.method)\n values = data[var].dropna()\n if Version(np.__version__) < Version(\"1.22.0\"):\n res = np.percentile(values, k, interpolation=method) # type: ignore\n else:\n res = np.percentile(data[var].dropna(), k, method=method)\n return DataFrame({var: res, \"percentile\": k})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n var = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return groupby.apply(data, self._percentile, var)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 66, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(np.__version__) < Version(\"1.22.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 66, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(np.__version__) < Version(\"1.22.0\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 10, "name": "PolyFit", "kind": "def", "category": "class", "info": "_fit_predict\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 21, "name": "_fit_predict", "kind": "def", "category": "function", "info": " def _fit_predict(self, data):\n\n x = data[\"x\"]\n y = data[\"y\"]\n if x.nunique() <= self.order:\n # TODO warn?\n xx = yy = []\n else:\n p = np.polyfit(x, y, self.order)\n xx = np.linspace(x.min(), x.max(), self.gridsize)\n yy = np.polyval(p, xx)\n\n return pd.DataFrame(dict(x=xx, y=yy))\n\n # TODO we should have a way of identifying the method that will be applied\n # and then only define __call__ on a base-class of stats with this pattern\n\n def __call__(self, data, groupby, orient, scales):\n\n return (\n groupby\n .apply(data.dropna(subset=[\"x\", \"y\"]), self._fit_predict)\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 25, "name": "nunique", "kind": "ref", "category": "function", "info": " if x.nunique() <= self.order:\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 42, "name": "apply", "kind": "ref", "category": "function", "info": " .apply(data.dropna(subset=[\"x\", \"y\"]), self._fit_predict)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 42, "name": "dropna", "kind": "ref", "category": "function", "info": " .apply(data.dropna(subset=[\"x\", \"y\"]), self._fit_predict)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 47, "name": "OLSFit", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 6, "name": "bootstrap", "kind": "def", "category": "function", "info": "def bootstrap(*args, **kwargs):\n \"\"\"Resample one or more arrays with replacement and store aggregate values.\n\n Positional arguments are a sequence of arrays to bootstrap along the first\n axis and pass to a summary function.\n\n Keyword arguments:\n n_boot : int, default=10000\n Number of iterations\n axis : int, default=None\n Will pass axis to ``func`` as a keyword argument.\n units : array, default=None\n Array of sampling unit IDs. When used the bootstrap resamples units\n and then observations within units instead of individual\n datapoints.\n func : string or callable, default=\"mean\"\n Function to call on the args that are passed in. If string, uses as\n name of function in the numpy namespace. If nans are present in the\n data, will try to use nan-aware version of named function.\n seed : Generator | SeedSequence | RandomState | int | None\n Seed for the random number generator; useful if you want\n reproducible resamples.\n\n Returns\n -------\n boot_dist: array\n array of bootstrapped statistic values\n\n \"\"\"\n # Ensure list of arrays are same length\n if len(np.unique(list(map(len, args)))) > 1:\n raise ValueError(\"All input arrays must have the same length\")\n n = len(args[0])\n\n # Default keyword arguments\n n_boot = kwargs.get(\"n_boot\", 10000)\n func = kwargs.get(\"func\", \"mean\")\n axis = kwargs.get(\"axis\", None)\n units = kwargs.get(\"units\", None)\n random_seed = kwargs.get(\"random_seed\", None)\n if random_seed is not None:\n msg = \"`random_seed` has been renamed to `seed` and will be removed\"\n warnings.warn(msg)\n seed = kwargs.get(\"seed\", random_seed)\n if axis is None:\n func_kwargs = dict()\n else:\n func_kwargs = dict(axis=axis)\n\n # Initialize the resampler\n rng = _handle_random_seed(seed)\n\n # Coerce to arrays\n args = list(map(np.asarray, args))\n if units is not None:\n units = np.asarray(units)\n\n if isinstance(func, str):\n\n # Allow named numpy functions\n f = getattr(np, func)\n\n # Try to use nan-aware version of function if necessary\n missing_data = np.isnan(np.sum(np.column_stack(args)))\n\n if missing_data and not func.startswith(\"nan\"):\n nanf = getattr(np, f\"nan{func}\", None)\n if nanf is None:\n msg = f\"Data contain nans but no nan-aware version of `{func}` found\"\n warnings.warn(msg, UserWarning)\n else:\n f = nanf\n\n else:\n f = func\n\n # Handle numpy changes\n try:\n integers = rng.integers\n except AttributeError:\n integers = rng.randint\n\n # Do the bootstrap\n if units is not None:\n return _structured_bootstrap(args, n_boot, units, f,\n func_kwargs, integers)\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n sample = [a.take(resampler, axis=0) for a in args]\n boot_dist.append(f(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 56, "name": "_handle_random_seed", "kind": "ref", "category": "function", "info": " rng = _handle_random_seed(seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 90, "name": "_structured_bootstrap", "kind": "ref", "category": "function", "info": " return _structured_bootstrap(args, n_boot, units, f,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 95, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 97, "name": "f", "kind": "ref", "category": "function", "info": " boot_dist.append(f(*sample, **func_kwargs))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 101, "name": "_structured_bootstrap", "kind": "def", "category": "function", "info": "def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):\n \"\"\"Resample units instead of datapoints.\"\"\"\n unique_units = np.unique(units)\n n_units = len(unique_units)\n\n args = [[a[units == unit] for unit in unique_units] for a in args]\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n_units, n_units, dtype=np.intp)\n sample = [[a[i] for i in resampler] for a in args]\n lengths = map(len, sample[0])\n resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]\n sample = list(map(np.concatenate, sample))\n boot_dist.append(func(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 110, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n_units, n_units, dtype=np.intp)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 113, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 116, "name": "func", "kind": "ref", "category": "function", "info": " boot_dist.append(func(*sample, **func_kwargs))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 120, "name": "_handle_random_seed", "kind": "def", "category": "function", "info": "def _handle_random_seed(seed=None):\n \"\"\"Given a seed in one of many formats, return a random number generator.\n\n Generalizes across the numpy 1.17 changes, preferring newer functionality.\n\n \"\"\"\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n try:\n # General interface for seeding on numpy >= 1.17\n rng = np.random.default_rng(seed)\n except AttributeError:\n # We are on numpy < 1.17, handle options ourselves\n if isinstance(seed, (numbers.Integral, np.integer)):\n rng = np.random.RandomState(seed)\n elif seed is None:\n rng = np.random.RandomState()\n else:\n err = \"{} cannot be used to seed the random number generator\"\n raise ValueError(err.format(seed))\n return rng\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 131, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 135, "name": "RandomState", "kind": "ref", "category": "function", "info": " rng = np.random.RandomState(seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 137, "name": "RandomState", "kind": "ref", "category": "function", "info": " rng = np.random.RandomState()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 26, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 31, "name": "_BaseGrid", "kind": "def", "category": "class", "info": "set\tfig\tfigure\tapply\tpipe\tsavefig"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 42, "name": "fig", "kind": "def", "category": "function", "info": " def fig(self):\n \"\"\"DEPRECATED: prefer the `figure` property.\"\"\"\n # Grid.figure is preferred because it matches the Axes attribute name.\n # But as the maintanace burden on having this property is minimal,\n # let's be slow about formally deprecating it. For now just note its deprecation\n # in the docstring; add a warning in version 0.13, and eventually remove it.\n return self._figure\n\n @property\n def figure(self):\n \"\"\"Access the :class:`matplotlib.figure.Figure` object underlying the grid.\"\"\"\n return self._figure\n\n def apply(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n \"\"\"\n func(self, *args, **kwargs)\n return self\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 55, "name": "apply", "kind": "def", "category": "function", "info": " def apply(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n \"\"\"\n func(self, *args, **kwargs)\n return self\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 67, "name": "func", "kind": "ref", "category": "function", "info": " func(self, *args, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 70, "name": "pipe", "kind": "def", "category": "function", "info": " def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 82, "name": "func", "kind": "ref", "category": "function", "info": " return func(self, *args, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 97, "name": "Grid", "kind": "def", "category": "class", "info": "__init__\ttight_layout\tadd_legend\t_update_legend_data\t_get_palette\tlegend\ttick_params"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 120, "name": "add_legend", "kind": "def", "category": "function", "info": " def add_legend(self, legend_data=None, title=None, label_order=None,\n adjust_subtitles=False, **kwargs):\n \"\"\"Draw a legend, maybe placing it outside axes and resizing the figure.\n\n Parameters\n ----------\n legend_data : dict\n Dictionary mapping label names (or two-element tuples where the\n second element is a label name) to matplotlib artist handles. The\n default reads from ``self._legend_data``.\n title : string\n Title for the legend. The default reads from ``self._hue_var``.\n label_order : list of labels\n The order that the legend entries should appear in. The default\n reads from ``self.hue_names``.\n adjust_subtitles : bool\n If True, modify entries with invisible artists to left-align\n the labels and set the font size to that of a title.\n kwargs : key, value pairings\n Other keyword arguments are passed to the underlying legend methods\n on the Figure or Axes object.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n # Find the data for the legend\n if legend_data is None:\n legend_data = self._legend_data\n if label_order is None:\n if self.hue_names is None:\n label_order = list(legend_data.keys())\n else:\n label_order = list(map(utils.to_utf8, self.hue_names))\n\n blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n handles = [legend_data.get(l, blank_handle) for l in label_order]\n title = self._hue_var if title is None else title\n title_size = mpl.rcParams[\"legend.title_fontsize\"]\n\n # Unpack nested labels from a hierarchical legend\n labels = []\n for entry in label_order:\n if isinstance(entry, tuple):\n _, label = entry\n else:\n label = entry\n labels.append(label)\n\n # Set default legend kwargs\n kwargs.setdefault(\"scatterpoints\", 1)\n\n if self._legend_out:\n\n kwargs.setdefault(\"frameon\", False)\n kwargs.setdefault(\"loc\", \"center right\")\n\n # Draw a full-figure legend outside the grid\n figlegend = self._figure.legend(handles, labels, **kwargs)\n\n self._legend = figlegend\n figlegend.set_title(title, prop={\"size\": title_size})\n\n if adjust_subtitles:\n adjust_legend_subtitles(figlegend)\n\n # Draw the plot to set the bounding boxes correctly\n _draw_figure(self._figure)\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n fig_width, fig_height = self._figure.get_size_inches()\n self._figure.set_size_inches(fig_width + legend_width, fig_height)\n\n # Draw the plot again to get the new transformations\n _draw_figure(self._figure)\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n space_needed = legend_width / (fig_width + legend_width)\n margin = .04 if self._margin_titles else .01\n self._space_needed = margin + space_needed\n right = 1 - self._space_needed\n\n # Place the subplot axes to give space for the legend\n self._figure.subplots_adjust(right=right)\n self._tight_layout_rect[2] = right\n\n else:\n # Draw a legend in the first axis\n ax = self.axes.flat[0]\n kwargs.setdefault(\"loc\", \"best\")\n\n leg = ax.legend(handles, labels, **kwargs)\n leg.set_title(title, prop={\"size\": title_size})\n self._legend = leg\n\n if adjust_subtitles:\n adjust_legend_subtitles(leg)\n\n return self\n\n def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = ax.legend_.legendHandles\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 157, "name": "Patch", "kind": "ref", "category": "function", "info": " blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 183, "name": "set_title", "kind": "ref", "category": "function", "info": " figlegend.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 186, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(figlegend)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 189, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 192, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 193, "name": "get_size_inches", "kind": "ref", "category": "function", "info": " fig_width, fig_height = self._figure.get_size_inches()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 194, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " self._figure.set_size_inches(fig_width + legend_width, fig_height)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 197, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 200, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 216, "name": "set_title", "kind": "ref", "category": "function", "info": " leg.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 220, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(leg)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 224, "name": "_update_legend_data", "kind": "def", "category": "function", "info": " def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = ax.legend_.legendHandles\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 232, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in ax.legend_.texts]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 235, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, labels = ax.get_legend_handles_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 243, "name": "_get_palette", "kind": "def", "category": "function", "info": " def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 246, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(n_colors=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 249, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 254, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 256, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 258, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 263, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(color_names, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 267, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 269, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(colors, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 362, "name": "FacetGrid", "kind": "def", "category": "class", "info": "__init__\tfacet_data\tmap\tmap_dataframe\t_facet_color\t_facet_plot\t_finalize_grid\tfacet_axis\tdespine\tset_axis_labels\tset_xlabels\tset_ylabels\tset_xticklabels\tset_yticklabels\tset_titles\trefline\taxes\tax\taxes_dict\t_inner_axes\t_left_axes\t_not_left_axes\t_bottom_axes\t_not_bottom_axes"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 382, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 384, "name": "_get_palette", "kind": "ref", "category": "function", "info": " colors = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 390, "name": "categorical_order", "kind": "ref", "category": "function", "info": " row_names = categorical_order(data[row], row_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 395, "name": "categorical_order", "kind": "ref", "category": "function", "info": " col_names = categorical_order(data[col], col_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 445, "name": "_disable_autolayout", "kind": "ref", "category": "function", "info": " with _disable_autolayout():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 475, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 481, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 524, "name": "set_titles", "kind": "ref", "category": "function", "info": " self.set_titles()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 528, "name": "despine", "kind": "ref", "category": "function", "info": " self.despine()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 532, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 533, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 534, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 535, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 539, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 540, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 541, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 542, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 636, "name": "facet_data", "kind": "def", "category": "function", "info": " def facet_data(self):\n \"\"\"Generator for name indices and data subsets for each facet.\n\n Yields\n ------\n (i, j, k), data_ijk : tuple of ints, DataFrame\n The ints provide an index into the {row, col, hue}_names attribute,\n and the dataframe contains a subset of the full data corresponding\n to each facet. The generator yields subsets that correspond with\n the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`\n is None.\n\n \"\"\"\n data = self.data\n\n # Construct masks for the row variable\n if self.row_names:\n row_masks = [data[self._row_var] == n for n in self.row_names]\n else:\n row_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the column variable\n if self.col_names:\n col_masks = [data[self._col_var] == n for n in self.col_names]\n else:\n col_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the hue variable\n if self.hue_names:\n hue_masks = [data[self._hue_var] == n for n in self.hue_names]\n else:\n hue_masks = [np.repeat(True, len(self.data))]\n\n # Here is the main generator loop\n for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),\n enumerate(col_masks),\n enumerate(hue_masks)):\n data_ijk = data[row & col & hue & self._not_na]\n yield (i, j, k), data_ijk\n\n def map(self, func, *args, **kwargs):\n \"\"\"Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # How we use the function depends on where it comes from\n func_module = str(getattr(func, \"__module__\", \"\"))\n\n # Check for categorical plots without order information\n if func_module == \"seaborn.categorical\":\n if \"order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n if len(args) == 3 and \"hue_order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`hue_order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not func_module.startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n\n # Get the actual data we are going to plot with\n plot_data = data_ijk[list(args)]\n if self._dropna:\n plot_data = plot_data.dropna()\n plot_args = [v for k, v in plot_data.items()]\n\n # Some matplotlib functions don't handle pandas objects correctly\n if func_module.startswith(\"matplotlib\"):\n plot_args = [v.values for v in plot_args]\n\n # Draw the plot\n self._facet_plot(func, ax, plot_args, kwargs)\n\n # Finalize the annotations and layout\n self._finalize_grid(args[:2])\n\n return self\n\n def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 719, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 727, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 730, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 738, "name": "to_utf8", "kind": "ref", "category": "function", "info": " kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 743, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 751, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, plot_args, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 754, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(args[:2])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 758, "name": "map_dataframe", "kind": "def", "category": "function", "info": " def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 791, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 799, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 802, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 814, "name": "dropna", "kind": "ref", "category": "function", "info": " data_ijk = data_ijk.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 818, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, args, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 825, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(axis_labels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 829, "name": "_facet_color", "kind": "def", "category": "function", "info": " def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 837, "name": "_facet_plot", "kind": "def", "category": "function", "info": " def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 847, "name": "func", "kind": "ref", "category": "function", "info": " func(*plot_args, **plot_kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 850, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 852, "name": "_finalize_grid", "kind": "def", "category": "function", "info": " def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 854, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " self.set_axis_labels(*axlabels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 857, "name": "facet_axis", "kind": "def", "category": "function", "info": " def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 871, "name": "despine", "kind": "def", "category": "function", "info": " def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 873, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(self._figure, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 876, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 880, "name": "set_xlabels", "kind": "ref", "category": "function", "info": " self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 883, "name": "set_ylabels", "kind": "ref", "category": "function", "info": " self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 887, "name": "set_xlabels", "kind": "def", "category": "function", "info": " def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 892, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 895, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 898, "name": "set_ylabels", "kind": "def", "category": "function", "info": " def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 903, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 906, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(\"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 909, "name": "set_xticklabels", "kind": "def", "category": "function", "info": " def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 912, "name": "get_xticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_xticks()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 913, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(curr_ticks)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 915, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 915, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 917, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = ax.get_xticks()[::step]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 919, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(xticks)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 920, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 922, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(labels, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 925, "name": "set_yticklabels", "kind": "def", "category": "function", "info": " def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 928, "name": "get_yticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_yticks()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 929, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(curr_ticks)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 932, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 934, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(labels, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 937, "name": "set_titles", "kind": "def", "category": "function", "info": " def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 976, "name": "to_utf8", "kind": "ref", "category": "function", "info": " row_template = utils.to_utf8(row_template)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 977, "name": "to_utf8", "kind": "ref", "category": "function", "info": " col_template = utils.to_utf8(col_template)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 978, "name": "to_utf8", "kind": "ref", "category": "function", "info": " template = utils.to_utf8(template)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1005, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[0, j].set_title(title, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1015, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, j].set_title(title, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1020, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, 0].set_title(title, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1026, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes.flat[i].set_title(title, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1029, "name": "refline", "kind": "def", "category": "function", "info": " def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1071, "name": "ax", "kind": "def", "category": "function", "info": " def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1082, "name": "axes_dict", "kind": "def", "category": "function", "info": " def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1095, "name": "_inner_axes", "kind": "def", "category": "function", "info": " def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1113, "name": "_left_axes", "kind": "def", "category": "function", "info": " def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1125, "name": "_not_left_axes", "kind": "def", "category": "function", "info": " def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1137, "name": "_bottom_axes", "kind": "def", "category": "function", "info": " def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1154, "name": "_not_bottom_axes", "kind": "def", "category": "function", "info": " def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1171, "name": "PairGrid", "kind": "def", "category": "class", "info": "__init__\tmap\tmap_lower\tmap_upper\tmap_offdiag\tmap_diag\t_map_diag_iter_hue\t_map_bivariate\t_plot_bivariate\t_plot_bivariate_iter_hue\t_add_axis_labels\t_find_numeric_cols"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1244, "name": "_find_numeric_cols", "kind": "ref", "category": "function", "info": " numeric_cols = self._find_numeric_cols(data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1272, "name": "_disable_autolayout", "kind": "ref", "category": "function", "info": " with _disable_autolayout():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1303, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1320, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = hue_order = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1332, "name": "_get_palette", "kind": "ref", "category": "function", "info": " self.palette = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1339, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1340, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1341, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1342, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1347, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1348, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1349, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1350, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1356, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(fig=fig)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1372, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1376, "name": "map_lower", "kind": "def", "category": "function", "info": " def map_lower(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the lower diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.tril_indices_from(self.axes, -1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1388, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1391, "name": "map_upper", "kind": "def", "category": "function", "info": " def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1403, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1406, "name": "map_offdiag", "kind": "def", "category": "function", "info": " def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1418, "name": "map_lower", "kind": "ref", "category": "function", "info": " self.map_lower(func, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1420, "name": "map_upper", "kind": "ref", "category": "function", "info": " self.map_upper(func, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1427, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1430, "name": "map_diag", "kind": "def", "category": "function", "info": " def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1453, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " diag_ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1460, "name": "set_visible", "kind": "ref", "category": "function", "info": " tick.tick1line.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1464, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1466, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1472, "name": "share_axis", "kind": "ref", "category": "function", "info": " share_axis(diag_axes[0], ax, \"y\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1478, "name": "_map_diag_iter_hue", "kind": "ref", "category": "function", "info": " return self._map_diag_iter_hue(func, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1506, "name": "func", "kind": "ref", "category": "function", "info": " func(x=vector, **plot_kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1509, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1512, "name": "_map_diag_iter_hue", "kind": "def", "category": "function", "info": " def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1518, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data[var].groupby(self.hue_vals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1530, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1540, "name": "remove_na", "kind": "ref", "category": "function", "info": " data_k = utils.remove_na(data_k)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1543, "name": "func", "kind": "ref", "category": "function", "info": " func(x=data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1545, "name": "func", "kind": "ref", "category": "function", "info": " func(data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1547, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1551, "name": "_map_bivariate", "kind": "def", "category": "function", "info": " def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1567, "name": "_plot_bivariate", "kind": "ref", "category": "function", "info": " self._plot_bivariate(x_var, y_var, ax, func, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1568, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1573, "name": "_plot_bivariate", "kind": "def", "category": "function", "info": " def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1576, "name": "_plot_bivariate_iter_hue", "kind": "ref", "category": "function", "info": " self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1595, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1608, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1610, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1612, "name": "_plot_bivariate_iter_hue", "kind": "def", "category": "function", "info": " def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1625, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data.groupby(self.hue_vals)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1632, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1638, "name": "dropna", "kind": "ref", "category": "function", "info": " data_k = data_k[axes_vars].dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1650, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1652, "name": "func", "kind": "ref", "category": "function", "info": " func(x, y, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1654, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1656, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1659, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1661, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1663, "name": "_find_numeric_cols", "kind": "def", "category": "function", "info": " def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1667, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1672, "name": "JointGrid", "kind": "def", "category": "class", "info": "__init__\t_inject_kwargs\tplot\tplot_joint\tplot_marginals\trefline\tset_axis_labels"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1692, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_joint = f.add_subplot(gs[1:, :-1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1693, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1694, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1702, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1703, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1704, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1705, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1709, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1710, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1711, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1712, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1713, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1714, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1715, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1716, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1721, "name": "VectorPlotter", "kind": "ref", "category": "function", "info": " p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1726, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1728, "name": "get_var", "kind": "def", "category": "function", "info": " def get_var(var):\n vector = plot_data.get(var, None)\n if vector is not None:\n vector = vector.rename(p.variables.get(var, None))\n return vector\n\n self.x = get_var(\"x\")\n self.y = get_var(\"y\")\n self.hue = get_var(\"hue\")\n\n for axis in \"xy\":\n name = p.variables.get(axis, None)\n if name is not None:\n getattr(ax_joint, f\"set_{axis}label\")(name)\n\n if xlim is not None:\n ax_joint.set_xlim(xlim)\n if ylim is not None:\n ax_joint.set_ylim(ylim)\n\n # Store the semantic mapping parameters for axes-level functions\n self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)\n\n # Make the grid look nice\n utils.despine(f)\n if not marginal_ticks:\n utils.despine(ax=ax_marg_x, left=True)\n utils.despine(ax=ax_marg_y, bottom=True)\n for axes in [ax_marg_x, ax_marg_y]:\n for axis in [axes.xaxis, axes.yaxis]:\n axis.label.set_visible(False)\n f.tight_layout()\n f.subplots_adjust(hspace=space, wspace=space)\n\n def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1731, "name": "rename", "kind": "ref", "category": "function", "info": " vector = vector.rename(p.variables.get(var, None))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1734, "name": "get_var", "kind": "ref", "category": "function", "info": " self.x = get_var(\"x\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1735, "name": "get_var", "kind": "ref", "category": "function", "info": " self.y = get_var(\"y\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1736, "name": "get_var", "kind": "ref", "category": "function", "info": " self.hue = get_var(\"hue\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1744, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax_joint.set_xlim(xlim)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1746, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax_joint.set_ylim(ylim)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1752, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(f)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1754, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_x, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1755, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_y, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1758, "name": "set_visible", "kind": "ref", "category": "function", "info": " axis.label.set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1762, "name": "_inject_kwargs", "kind": "def", "category": "function", "info": " def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1791, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " self.plot_marginals(marginal_func, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1792, "name": "plot_joint", "kind": "ref", "category": "function", "info": " self.plot_joint(joint_func, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1795, "name": "plot_joint", "kind": "def", "category": "function", "info": " def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1822, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1825, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, y=self.y, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1827, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, self.y, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1831, "name": "plot_marginals", "kind": "def", "category": "function", "info": " def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1861, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1876, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, ax=self.ax_marg_x, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1879, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, **orient_kw_x, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1882, "name": "func", "kind": "ref", "category": "function", "info": " func(y=self.y, ax=self.ax_marg_y, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1885, "name": "func", "kind": "ref", "category": "function", "info": " func(self.y, **orient_kw_y, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1887, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1887, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1888, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1888, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1892, "name": "refline", "kind": "def", "category": "function", "info": " def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1936, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1955, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1956, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2004, "name": "pairplot", "kind": "def", "category": "function", "info": "def pairplot(\n data, *,\n hue=None, hue_order=None, palette=None,\n vars=None, x_vars=None, y_vars=None,\n kind=\"scatter\", diag_kind=\"auto\", markers=None,\n height=2.5, aspect=1, corner=False, dropna=False,\n plot_kws=None, diag_kws=None, grid_kws=None, size=None,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2113, "name": "PairGrid", "kind": "ref", "category": "function", "info": " grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2143, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(histplot, **diag_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2147, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(kdeplot, **diag_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2157, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(scatterplot, **plot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2160, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(regplot, **plot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2164, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(kdeplot, **plot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2167, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(histplot, **plot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2171, "name": "add_legend", "kind": "ref", "category": "function", "info": " grid.add_legend()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2178, "name": "jointplot", "kind": "def", "category": "function", "info": "def jointplot(\n data=None, *, x=None, y=None, hue=None, kind=\"scatter\",\n height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,\n color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,\n joint_kws=None, marginal_kws=None,\n **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2217, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", plot_kinds, kind)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2230, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color_rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2231, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " colors = [utils.set_hls_values(color_rgb, l=l) # noqa\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2233, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(colors, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2240, "name": "JointGrid", "kind": "ref", "category": "function", "info": " grid = JointGrid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2254, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(scatterplot, **joint_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2264, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(marg_func, **marginal_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2272, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(histplot, **joint_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2287, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2288, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2294, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(kdeplot, **joint_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2300, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(kdeplot, **marginal_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2304, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " x_bins = min(_freedman_diaconis_bins(grid.x), 50)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2305, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " y_bins = min(_freedman_diaconis_bins(grid.y), 50)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2310, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(plt.hexbin, **joint_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2314, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2320, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2323, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(regplot, **joint_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2328, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(residplot, **joint_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2330, "name": "get_offsets", "kind": "ref", "category": "function", "info": " x, y = grid.ax_joint.collections[0].get_offsets().T\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2332, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2333, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 43, "name": "_CategoricalPlotterNew", "kind": "def", "category": "class", "info": "__init__\t_hue_backcompat\t_palette_without_hue_backcompat\tcat_axis\t_get_gray\t_adjust_cat_axis\t_native_width\t_nested_offsets\tplot_strips\tplot_swarms"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 77, "name": "rename", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.rename(columns={\"x\": \"y\", \"y\": \"x\"})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 95, "name": "infer_orient", "kind": "ref", "category": "function", "info": " self.orient = infer_orient(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 117, "name": "categorical_order", "kind": "ref", "category": "function", "info": " cat_levels = categorical_order(self.plot_data[self.cat_axis], order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 120, "name": "_hue_backcompat", "kind": "def", "category": "function", "info": " def _hue_backcompat(self, color, palette, hue_order, force_hue=False):\n \"\"\"Implement backwards compatibility for hue parametrization.\n\n Note: the force_hue parameter is used so that functions can be shown to\n pass existing tests during refactoring and then tested for new behavior.\n It can be removed after completion of the work.\n\n \"\"\"\n # The original categorical functions applied a palette to the categorical axis\n # by default. We want to require an explicit hue mapping, to be more consistent\n # with how things work elsewhere now. I don't think there's any good way to\n # do this gently -- because it's triggered by the default value of hue=None,\n # users would always get a warning, unless we introduce some sentinel \"default\"\n # argument for this change. That's possible, but asking users to set `hue=None`\n # on every call is annoying.\n # We are keeping the logic for implementing the old behavior in with the current\n # system so that (a) we can punt on that decision and (b) we can ensure that\n # refactored code passes old tests.\n default_behavior = color is None or palette is not None\n if force_hue and \"hue\" not in self.variables and default_behavior:\n self._redundant_hue = True\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables[self.cat_axis]\n self.var_types[\"hue\"] = \"categorical\"\n hue_order = self.var_levels[self.cat_axis]\n\n # Because we convert the categorical axis variable to string,\n # we need to update a dictionary palette too\n if isinstance(palette, dict):\n palette = {str(k): v for k, v in palette.items()}\n\n else:\n self._redundant_hue = False\n\n # Previously, categorical plots had a trick where color= could seed the palette.\n # Because that's an explicit parameterization, we are going to give it one\n # release cycle with a warning before removing.\n if \"hue\" in self.variables and palette is None and color is not None:\n if not isinstance(color, str):\n color = mpl.colors.to_hex(color)\n palette = f\"dark:{color}\"\n msg = (\n \"Setting a gradient palette using color= is deprecated and will be \"\n f\"removed in version 0.13. Set `palette='{palette}'` for same effect.\"\n )\n warnings.warn(msg, FutureWarning)\n\n return palette, hue_order\n\n def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = \"Passing `palette` without assigning `hue` is deprecated.\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables.get(self.cat_axis)\n self.var_types[\"hue\"] = self.var_types.get(self.cat_axis)\n hue_order = self.var_levels.get(self.cat_axis)\n return hue_order\n\n @property\n def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 159, "name": "to_hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.to_hex(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 169, "name": "_palette_without_hue_backcompat", "kind": "def", "category": "function", "info": " def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = \"Passing `palette` without assigning `hue` is deprecated.\"\n warnings.warn(msg, FutureWarning, stacklevel=3)\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.cat_axis]\n self.variables[\"hue\"] = self.variables.get(self.cat_axis)\n self.var_types[\"hue\"] = self.var_types.get(self.cat_axis)\n hue_order = self.var_levels.get(self.cat_axis)\n return hue_order\n\n @property\n def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 182, "name": "cat_axis", "kind": "def", "category": "function", "info": " def cat_axis(self):\n return {\"v\": \"x\", \"h\": \"y\"}[self.orient]\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 185, "name": "_get_gray", "kind": "def", "category": "function", "info": " def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 194, "name": "_adjust_cat_axis", "kind": "def", "category": "function", "info": " def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 219, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, n - .5, auto=None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 223, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(n - .5, -.5, auto=None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 226, "name": "_native_width", "kind": "def", "category": "function", "info": " def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n unique_values = np.unique(self.comp_data[self.cat_axis])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 235, "name": "_nested_offsets", "kind": "def", "category": "function", "info": " def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 254, "name": "plot_strips", "kind": "def", "category": "function", "info": " def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.cat_axis] + dodge_move + jitter_move\n sub_data[self.cat_axis] = adjusted_data\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 264, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 282, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 288, "name": "jitterer", "kind": "ref", "category": "function", "info": " jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 294, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 297, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 301, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 304, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 315, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 316, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 320, "name": "plot_swarms", "kind": "def", "category": "function", "info": " def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.cat_axis]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.cat_axis] = sub_data[self.cat_axis] + dodge_move\n\n for var in \"xy\":\n if self._log_scaled(var):\n sub_data[var] = np.power(10, sub_data[var])\n\n ax = self._get_axes(sub_vars)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.cat_axis].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"h\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.cat_axis] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n\n # Finalize the axes details\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 330, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 340, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 351, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 354, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 358, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 361, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 368, "name": "Beeswarm", "kind": "ref", "category": "function", "info": " beeswarm = Beeswarm(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 376, "name": "beeswarm", "kind": "ref", "category": "function", "info": " beeswarm(points, center)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 380, "name": "get_autoscaley_on", "kind": "ref", "category": "function", "info": " scaley = ax.get_autoscaley_on()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 382, "name": "get_autoscalex_on", "kind": "ref", "category": "function", "info": " scalex = ax.get_autoscalex_on()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 390, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(points.get_datalim(ax.transData))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 392, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=scalex, scaley=scaley)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 398, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 407, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 408, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 413, "name": "_CategoricalFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 418, "name": "_CategoricalPlotter", "kind": "def", "category": "class", "info": "establish_variables\t_group_longform\testablish_colors\thue_offsets\tnested_width\tannotate_axes\tadd_legend_data"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 424, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, x=None, y=None, hue=None, data=None,\n orient=None, order=None, hue_order=None,\n units=None):\n \"\"\"Convert input specification into a common representation.\"\"\"\n # Option 1:\n # We are plotting a wide-form dataset\n # -----------------------------------\n if x is None and y is None:\n\n # Do a sanity check on the inputs\n if hue is not None:\n error = \"Cannot use `hue` without `x` and `y`\"\n raise ValueError(error)\n\n # No hue grouping with wide inputs\n plot_hues = None\n hue_title = None\n hue_names = None\n\n # No statistical units with wide inputs\n plot_units = None\n\n # We also won't get a axes labels here\n value_label = None\n group_label = None\n\n # Option 1a:\n # The input data is a Pandas DataFrame\n # ------------------------------------\n\n if isinstance(data, pd.DataFrame):\n\n # Order the data correctly\n if order is None:\n order = []\n # Reduce to just numeric columns\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n order.append(col)\n plot_data = data[order]\n group_names = order\n group_label = data.columns.name\n\n # Convert to a list of arrays, the common representation\n iter_data = plot_data.items()\n plot_data = [np.asarray(s, float) for k, s in iter_data]\n\n # Option 1b:\n # The input data is an array or list\n # ----------------------------------\n\n else:\n\n # We can't reorder the data\n if order is not None:\n error = \"Input data must be a pandas object to reorder\"\n raise ValueError(error)\n\n # The input data is an array\n if hasattr(data, \"shape\"):\n if len(data.shape) == 1:\n if np.isscalar(data[0]):\n plot_data = [data]\n else:\n plot_data = list(data)\n elif len(data.shape) == 2:\n nr, nc = data.shape\n if nr == 1 or nc == 1:\n plot_data = [data.ravel()]\n else:\n plot_data = [data[:, i] for i in range(nc)]\n else:\n error = (\"Input `data` can have no \"\n \"more than 2 dimensions\")\n raise ValueError(error)\n\n # Check if `data` is None to let us bail out here (for testing)\n elif data is None:\n plot_data = [[]]\n\n # The input data is a flat list\n elif np.isscalar(data[0]):\n plot_data = [data]\n\n # The input data is a nested list\n # This will catch some things that might fail later\n # but exhaustive checks are hard\n else:\n plot_data = data\n\n # Convert to a list of arrays, the common representation\n plot_data = [np.asarray(d, float) for d in plot_data]\n\n # The group names will just be numeric indices\n group_names = list(range(len(plot_data)))\n\n # Figure out the plotting orientation\n orient = \"h\" if str(orient).startswith(\"h\") else \"v\"\n\n # Option 2:\n # We are plotting a long-form dataset\n # -----------------------------------\n\n else:\n\n # See if we need to get variables from `data`\n if data is not None:\n x = data.get(x, x)\n y = data.get(y, y)\n hue = data.get(hue, hue)\n units = data.get(units, units)\n\n # Validate the inputs\n for var in [x, y, hue, units]:\n if isinstance(var, str):\n err = f\"Could not interpret input '{var}'\"\n raise ValueError(err)\n\n # Figure out the plotting orientation\n orient = infer_orient(\n x, y, orient, require_numeric=self.require_numeric\n )\n\n # Option 2a:\n # We are plotting a single set of data\n # ------------------------------------\n if x is None or y is None:\n\n # Determine where the data are\n vals = y if x is None else x\n\n # Put them into the common representation\n plot_data = [np.asarray(vals)]\n\n # Get a label for the value axis\n if hasattr(vals, \"name\"):\n value_label = vals.name\n else:\n value_label = None\n\n # This plot will not have group labels or hue nesting\n groups = None\n group_label = None\n group_names = []\n plot_hues = None\n hue_names = None\n hue_title = None\n plot_units = None\n\n # Option 2b:\n # We are grouping the data values by another variable\n # ---------------------------------------------------\n else:\n\n # Determine which role each variable will play\n if orient == \"v\":\n vals, groups = y, x\n else:\n vals, groups = x, y\n\n # Get the categorical axis label\n group_label = None\n if hasattr(groups, \"name\"):\n group_label = groups.name\n\n # Get the order on the categorical axis\n group_names = categorical_order(groups, order)\n\n # Group the numeric data\n plot_data, value_label = self._group_longform(vals, groups,\n group_names)\n\n # Now handle the hue levels for nested ordering\n if hue is None:\n plot_hues = None\n hue_title = None\n hue_names = None\n else:\n\n # Get the order of the hue levels\n hue_names = categorical_order(hue, hue_order)\n\n # Group the hue data\n plot_hues, hue_title = self._group_longform(hue, groups,\n group_names)\n\n # Now handle the units for nested observations\n if units is None:\n plot_units = None\n else:\n plot_units, _ = self._group_longform(units, groups,\n group_names)\n\n # Assign object attributes\n # ------------------------\n self.orient = orient\n self.plot_data = plot_data\n self.group_label = group_label\n self.value_label = value_label\n self.group_names = group_names\n self.plot_hues = plot_hues\n self.hue_title = hue_title\n self.hue_names = hue_names\n self.plot_units = plot_units\n\n def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 461, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 543, "name": "infer_orient", "kind": "ref", "category": "function", "info": " orient = infer_orient(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 590, "name": "categorical_order", "kind": "ref", "category": "function", "info": " group_names = categorical_order(groups, order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 593, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_data, value_label = self._group_longform(vals, groups,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 604, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(hue, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 607, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_hues, hue_title = self._group_longform(hue, groups,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 614, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_units, _ = self._group_longform(units, groups,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 629, "name": "_group_longform", "kind": "def", "category": "function", "info": " def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 640, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped_vals = vals.groupby(grouper)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 644, "name": "get_group", "kind": "ref", "category": "function", "info": " g_vals = grouped_vals.get_group(g)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 654, "name": "establish_colors", "kind": "def", "category": "function", "info": " def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 665, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 667, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 669, "name": "husl_palette", "kind": "ref", "category": "function", "info": " colors = husl_palette(n_colors, l=.7) # noqa\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 680, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 682, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 695, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 699, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(colors, desat=saturation)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 702, "name": "color_palette", "kind": "ref", "category": "function", "info": " rgb_colors = color_palette(colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 707, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " gray = mpl.colors.rgb2hex((lum, lum, lum))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 714, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 727, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 735, "name": "annotate_axes", "kind": "def", "category": "function", "info": " def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 743, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 745, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 752, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 753, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(group_names)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 755, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 756, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(group_names)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 760, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 763, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 768, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 775, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(rect)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 778, "name": "_BoxPlotter", "kind": "def", "category": "class", "info": "__init__\tdraw_boxplot\trestyle_boxplot\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 784, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 785, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 795, "name": "draw_boxplot", "kind": "def", "category": "function", "info": " def draw_boxplot(self, ax, kws):\n \"\"\"Use matplotlib to draw a boxplot on an Axes.\"\"\"\n vert = self.orient == \"v\"\n\n props = {}\n for obj in [\"box\", \"whisker\", \"cap\", \"median\", \"flier\"]:\n props[obj] = kws.pop(obj + \"props\", {})\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = np.asarray(remove_na(group_data))\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n artist_dict = ax.boxplot(box_data,\n vert=vert,\n patch_artist=True,\n positions=[i],\n widths=self.width,\n **kws)\n color = self.colors[i]\n self.restyle_boxplot(artist_dict, color, props)\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = np.asarray(remove_na(group_data[hue_mask]))\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n center = i + offsets[j]\n artist_dict = ax.boxplot(box_data,\n vert=vert,\n patch_artist=True,\n positions=[center],\n widths=self.nested_width,\n **kws)\n self.restyle_boxplot(artist_dict, self.colors[j], props)\n # Add legend data, but just for one set of boxes\n\n def restyle_boxplot(self, artist_dict, color, props):\n \"\"\"Take a drawn matplotlib boxplot and make it look nice.\"\"\"\n for box in artist_dict[\"boxes\"]:\n box.update(dict(facecolor=color,\n zorder=.9,\n edgecolor=self.gray,\n linewidth=self.linewidth))\n box.update(props[\"box\"])\n for whisk in artist_dict[\"whiskers\"]:\n whisk.update(dict(color=self.gray,\n linewidth=self.linewidth,\n linestyle=\"-\"))\n whisk.update(props[\"whisker\"])\n for cap in artist_dict[\"caps\"]:\n cap.update(dict(color=self.gray,\n linewidth=self.linewidth))\n cap.update(props[\"cap\"])\n for med in artist_dict[\"medians\"]:\n med.update(dict(color=self.gray,\n linewidth=self.linewidth))\n med.update(props[\"median\"])\n for fly in artist_dict[\"fliers\"]:\n fly.update(dict(markerfacecolor=self.gray,\n marker=\"d\",\n markeredgecolor=self.gray,\n markersize=self.fliersize))\n fly.update(props[\"flier\"])\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_boxplot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 813, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = np.asarray(remove_na(group_data))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 826, "name": "restyle_boxplot", "kind": "ref", "category": "function", "info": " self.restyle_boxplot(artist_dict, color, props)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 834, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 841, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = np.asarray(remove_na(group_data[hue_mask]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 854, "name": "restyle_boxplot", "kind": "ref", "category": "function", "info": " self.restyle_boxplot(artist_dict, self.colors[j], props)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 857, "name": "restyle_boxplot", "kind": "def", "category": "function", "info": " def restyle_boxplot(self, artist_dict, color, props):\n \"\"\"Take a drawn matplotlib boxplot and make it look nice.\"\"\"\n for box in artist_dict[\"boxes\"]:\n box.update(dict(facecolor=color,\n zorder=.9,\n edgecolor=self.gray,\n linewidth=self.linewidth))\n box.update(props[\"box\"])\n for whisk in artist_dict[\"whiskers\"]:\n whisk.update(dict(color=self.gray,\n linewidth=self.linewidth,\n linestyle=\"-\"))\n whisk.update(props[\"whisker\"])\n for cap in artist_dict[\"caps\"]:\n cap.update(dict(color=self.gray,\n linewidth=self.linewidth))\n cap.update(props[\"cap\"])\n for med in artist_dict[\"medians\"]:\n med.update(dict(color=self.gray,\n linewidth=self.linewidth))\n med.update(props[\"median\"])\n for fly in artist_dict[\"fliers\"]:\n fly.update(dict(markerfacecolor=self.gray,\n marker=\"d\",\n markeredgecolor=self.gray,\n markersize=self.fliersize))\n fly.update(props[\"flier\"])\n\n def plot(self, ax, boxplot_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_boxplot(ax, boxplot_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 887, "name": "draw_boxplot", "kind": "ref", "category": "function", "info": " self.draw_boxplot(ax, boxplot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 888, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 890, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 893, "name": "_ViolinPlotter", "kind": "def", "category": "class", "info": "__init__\testimate_densities\tfit_kde\tkde_support\tscale_area\tscale_width\tscale_count\tdwidth\tdraw_violins\tdraw_single_observation\tdraw_box_lines\tdraw_quartiles\tdraw_points\tdraw_stick_lines\tdraw_to_density\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 900, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 901, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 902, "name": "estimate_densities", "kind": "ref", "category": "function", "info": " self.estimate_densities(bw, cut, scale, scale_hue, gridsize)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 926, "name": "estimate_densities", "kind": "def", "category": "function", "info": " def estimate_densities(self, bw, cut, scale, scale_hue, gridsize):\n \"\"\"Find the support and density for all of the data.\"\"\"\n # Initialize data structures to keep track of plotting data\n if self.hue_names is None:\n support = []\n density = []\n counts = np.zeros(len(self.plot_data))\n max_density = np.zeros(len(self.plot_data))\n else:\n support = [[] for _ in self.plot_data]\n density = [[] for _ in self.plot_data]\n size = len(self.group_names), len(self.hue_names)\n counts = np.zeros(size)\n max_density = np.zeros(size)\n\n for i, group_data in enumerate(self.plot_data):\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n # Strip missing datapoints\n kde_data = remove_na(group_data)\n\n # Handle special case of no data at this level\n if kde_data.size == 0:\n support.append(np.array([]))\n density.append(np.array([1.]))\n counts[i] = 0\n max_density[i] = 0\n continue\n\n # Handle special case of a single unique datapoint\n elif np.unique(kde_data).size == 1:\n support.append(np.unique(kde_data))\n density.append(np.array([1.]))\n counts[i] = 1\n max_density[i] = 0\n continue\n\n # Fit the KDE and get the used bandwidth size\n kde, bw_used = self.fit_kde(kde_data, bw)\n\n # Determine the support grid and get the density over it\n support_i = self.kde_support(kde_data, bw_used, cut, gridsize)\n density_i = kde.evaluate(support_i)\n\n # Update the data structures with these results\n support.append(support_i)\n density.append(density_i)\n counts[i] = kde_data.size\n max_density[i] = density_i.max()\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n for j, hue_level in enumerate(self.hue_names):\n\n # Handle special case of no data at this category level\n if not group_data.size:\n support[i].append(np.array([]))\n density[i].append(np.array([1.]))\n counts[i, j] = 0\n max_density[i, j] = 0\n continue\n\n # Select out the observations for this hue level\n hue_mask = self.plot_hues[i] == hue_level\n\n # Strip missing datapoints\n kde_data = remove_na(group_data[hue_mask])\n\n # Handle special case of no data at this level\n if kde_data.size == 0:\n support[i].append(np.array([]))\n density[i].append(np.array([1.]))\n counts[i, j] = 0\n max_density[i, j] = 0\n continue\n\n # Handle special case of a single unique datapoint\n elif np.unique(kde_data).size == 1:\n support[i].append(np.unique(kde_data))\n density[i].append(np.array([1.]))\n counts[i, j] = 1\n max_density[i, j] = 0\n continue\n\n # Fit the KDE and get the used bandwidth size\n kde, bw_used = self.fit_kde(kde_data, bw)\n\n # Determine the support grid and get the density over it\n support_ij = self.kde_support(kde_data, bw_used,\n cut, gridsize)\n density_ij = kde.evaluate(support_ij)\n\n # Update the data structures with these results\n support[i].append(support_ij)\n density[i].append(density_ij)\n counts[i, j] = kde_data.size\n max_density[i, j] = density_ij.max()\n\n # Scale the height of the density curve.\n # For a violinplot the density is non-quantitative.\n # The objective here is to scale the curves relative to 1 so that\n # they can be multiplied by the width parameter during plotting.\n\n if scale == \"area\":\n self.scale_area(density, max_density, scale_hue)\n\n elif scale == \"width\":\n self.scale_width(density)\n\n elif scale == \"count\":\n self.scale_count(density, counts, scale_hue)\n\n else:\n raise ValueError(f\"scale method '{scale}' not recognized\")\n\n # Set object attributes that will be used while plotting\n self.support = support\n self.density = density\n\n def fit_kde(self, x, bw):\n \"\"\"Estimate a KDE for a vector of data with flexible bandwidth.\"\"\"\n kde = gaussian_kde(x, bw)\n\n # Extract the numeric bandwidth from the KDE object\n bw_used = kde.factor\n\n # At this point, bw will be a numeric scale factor.\n # To get the actual bandwidth of the kernel, we multiple by the\n # unbiased standard deviation of the data, which we will use\n # elsewhere to compute the range of the support.\n bw_used = bw_used * x.std(ddof=1)\n\n return kde, bw_used\n\n def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 949, "name": "remove_na", "kind": "ref", "category": "function", "info": " kde_data = remove_na(group_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 968, "name": "fit_kde", "kind": "ref", "category": "function", "info": " kde, bw_used = self.fit_kde(kde_data, bw)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 971, "name": "kde_support", "kind": "ref", "category": "function", "info": " support_i = self.kde_support(kde_data, bw_used, cut, gridsize)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 998, "name": "remove_na", "kind": "ref", "category": "function", "info": " kde_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1017, "name": "fit_kde", "kind": "ref", "category": "function", "info": " kde, bw_used = self.fit_kde(kde_data, bw)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1020, "name": "kde_support", "kind": "ref", "category": "function", "info": " support_ij = self.kde_support(kde_data, bw_used,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1036, "name": "scale_area", "kind": "ref", "category": "function", "info": " self.scale_area(density, max_density, scale_hue)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1039, "name": "scale_width", "kind": "ref", "category": "function", "info": " self.scale_width(density)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1042, "name": "scale_count", "kind": "ref", "category": "function", "info": " self.scale_count(density, counts, scale_hue)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1051, "name": "fit_kde", "kind": "def", "category": "function", "info": " def fit_kde(self, x, bw):\n \"\"\"Estimate a KDE for a vector of data with flexible bandwidth.\"\"\"\n kde = gaussian_kde(x, bw)\n\n # Extract the numeric bandwidth from the KDE object\n bw_used = kde.factor\n\n # At this point, bw will be a numeric scale factor.\n # To get the actual bandwidth of the kernel, we multiple by the\n # unbiased standard deviation of the data, which we will use\n # elsewhere to compute the range of the support.\n bw_used = bw_used * x.std(ddof=1)\n\n return kde, bw_used\n\n def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1066, "name": "kde_support", "kind": "def", "category": "function", "info": " def kde_support(self, x, bw, cut, gridsize):\n \"\"\"Define a grid of support for the violin.\"\"\"\n support_min = x.min() - bw * cut\n support_max = x.max() + bw * cut\n return np.linspace(support_min, support_max, gridsize)\n\n def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1072, "name": "scale_area", "kind": "def", "category": "function", "info": " def scale_area(self, density, max_density, scale_hue):\n \"\"\"Scale the relative area under the KDE curve.\n\n This essentially preserves the \"standard\" KDE scaling, but the\n resulting maximum density will be 1 so that the curve can be\n properly multiplied by the violin width.\n\n \"\"\"\n if self.hue_names is None:\n for d in density:\n if d.size > 1:\n d /= max_density.max()\n else:\n for i, group in enumerate(density):\n for d in group:\n if scale_hue:\n max = max_density[i].max()\n else:\n max = max_density.max()\n if d.size > 1:\n d /= max\n\n def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1094, "name": "scale_width", "kind": "def", "category": "function", "info": " def scale_width(self, density):\n \"\"\"Scale each density curve to the same height.\"\"\"\n if self.hue_names is None:\n for d in density:\n d /= d.max()\n else:\n for group in density:\n for d in group:\n d /= d.max()\n\n def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1104, "name": "scale_count", "kind": "def", "category": "function", "info": " def scale_count(self, density, counts, scale_hue):\n \"\"\"Scale each density curve by the number of observations.\"\"\"\n if self.hue_names is None:\n if counts.max() == 0:\n d = 0\n else:\n for count, d in zip(counts, density):\n d /= d.max()\n d *= count / counts.max()\n else:\n for i, group in enumerate(density):\n for j, d in enumerate(group):\n if counts[i].max() == 0:\n d = 0\n else:\n count = counts[i, j]\n if scale_hue:\n scaler = count / counts[i].max()\n else:\n scaler = count / counts.max()\n d /= d.max()\n d *= scaler\n\n @property\n def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1128, "name": "dwidth", "kind": "def", "category": "function", "info": " def dwidth(self):\n\n if self.hue_names is None or not self.dodge:\n return self.width / 2\n elif self.split:\n return self.width / 2\n else:\n return self.width / (2 * len(self.hue_names))\n\n def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1137, "name": "draw_violins", "kind": "def", "category": "function", "info": " def draw_violins(self, ax):\n \"\"\"Draw the violins onto `ax`.\"\"\"\n fill_func = ax.fill_betweenx if self.orient == \"v\" else ax.fill_between\n for i, group_data in enumerate(self.plot_data):\n\n kws = dict(edgecolor=self.gray, linewidth=self.linewidth)\n\n # Option 1: we have a single level of grouping\n # --------------------------------------------\n\n if self.plot_hues is None:\n\n support, density = self.support[i], self.density[i]\n\n # Handle special case of no observations in this bin\n if support.size == 0:\n continue\n\n # Handle special case of a single observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n self.draw_single_observation(ax, i, val, d)\n continue\n\n # Draw the violin for this group\n grid = np.ones(self.gridsize) * i\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n facecolor=self.colors[i],\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data, support, density, i)\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data, support, density, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2: we have nested grouping by a hue variable\n # ---------------------------------------------------\n\n else:\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n support, density = self.support[i][j], self.density[i][j]\n kws[\"facecolor\"] = self.colors[j]\n\n # Add legend data, but just for one set of violins\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle the special case where we have no observations\n if support.size == 0:\n continue\n\n # Handle the special case where we have one observation\n elif support.size == 1:\n val = support.item()\n d = density.item()\n if self.split:\n d = d / 2\n at_group = i + offsets[j]\n self.draw_single_observation(ax, at_group, val, d)\n continue\n\n # Option 2a: we are drawing a single split violin\n # -----------------------------------------------\n\n if self.split:\n\n grid = np.ones(self.gridsize) * i\n if j:\n fill_func(support,\n grid,\n grid + density * self.dwidth,\n **kws)\n else:\n fill_func(support,\n grid - density * self.dwidth,\n grid,\n **kws)\n\n # Draw the interior representation of the data\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw quartile lines\n if self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density, i,\n [\"left\", \"right\"][j])\n\n # The box and point interior plots are drawn for\n # all data at the group level, so we just do that once\n if j and any(self.plot_hues[0] == hue_level):\n continue\n\n # Get the whole vector for this group level\n violin_data = remove_na(group_data)\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i)\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i)\n\n # Option 2b: we are drawing full nested violins\n # -----------------------------------------------\n\n else:\n grid = np.ones(self.gridsize) * (i + offsets[j])\n fill_func(support,\n grid - density * self.dwidth,\n grid + density * self.dwidth,\n **kws)\n\n # Draw the interior representation\n if self.inner is None:\n continue\n\n # Get a nan-free vector of datapoints\n hue_mask = self.plot_hues[i] == hue_level\n violin_data = remove_na(group_data[hue_mask])\n\n # Draw box and whisker information\n if self.inner.startswith(\"box\"):\n self.draw_box_lines(ax, violin_data, i + offsets[j])\n\n # Draw quartile lines\n elif self.inner.startswith(\"quart\"):\n self.draw_quartiles(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw stick observations\n elif self.inner.startswith(\"stick\"):\n self.draw_stick_lines(ax, violin_data,\n support, density,\n i + offsets[j])\n\n # Draw point observations\n elif self.inner.startswith(\"point\"):\n self.draw_points(ax, violin_data, i + offsets[j])\n\n def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1157, "name": "item", "kind": "ref", "category": "function", "info": " val = support.item()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1158, "name": "item", "kind": "ref", "category": "function", "info": " d = density.item()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1159, "name": "draw_single_observation", "kind": "ref", "category": "function", "info": " self.draw_single_observation(ax, i, val, d)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1164, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1175, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1179, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1183, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data, support, density, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1187, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data, support, density, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1191, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1205, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1213, "name": "item", "kind": "ref", "category": "function", "info": " val = support.item()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1214, "name": "item", "kind": "ref", "category": "function", "info": " d = density.item()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1218, "name": "draw_single_observation", "kind": "ref", "category": "function", "info": " self.draw_single_observation(ax, at_group, val, d)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1228, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1233, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1244, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1248, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1254, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1264, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1268, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1272, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1279, "name": "fill_func", "kind": "ref", "category": "function", "info": " fill_func(support,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1290, "name": "remove_na", "kind": "ref", "category": "function", "info": " violin_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1294, "name": "draw_box_lines", "kind": "ref", "category": "function", "info": " self.draw_box_lines(ax, violin_data, i + offsets[j])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1298, "name": "draw_quartiles", "kind": "ref", "category": "function", "info": " self.draw_quartiles(ax, violin_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1304, "name": "draw_stick_lines", "kind": "ref", "category": "function", "info": " self.draw_stick_lines(ax, violin_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1310, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax, violin_data, i + offsets[j])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1312, "name": "draw_single_observation", "kind": "def", "category": "function", "info": " def draw_single_observation(self, ax, at_group, at_quant, density):\n \"\"\"Draw a line to mark a single observation.\"\"\"\n d_width = density * self.dwidth\n if self.orient == \"v\":\n ax.plot([at_group - d_width, at_group + d_width],\n [at_quant, at_quant],\n color=self.gray,\n linewidth=self.linewidth)\n else:\n ax.plot([at_quant, at_quant],\n [at_group - d_width, at_group + d_width],\n color=self.gray,\n linewidth=self.linewidth)\n\n def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1326, "name": "draw_box_lines", "kind": "def", "category": "function", "info": " def draw_box_lines(self, ax, data, center):\n \"\"\"Draw boxplot information at center of the density.\"\"\"\n # Compute the boxplot statistics\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n whisker_lim = 1.5 * (q75 - q25)\n h1 = np.min(data[data >= (q25 - whisker_lim)])\n h2 = np.max(data[data <= (q75 + whisker_lim)])\n\n # Draw a boxplot using lines and a point\n if self.orient == \"v\":\n ax.plot([center, center], [h1, h2],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([center, center], [q25, q75],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(center, q50,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n else:\n ax.plot([h1, h2], [center, center],\n linewidth=self.linewidth,\n color=self.gray)\n ax.plot([q25, q75], [center, center],\n linewidth=self.linewidth * 3,\n color=self.gray)\n ax.scatter(q50, center,\n zorder=3,\n color=\"white\",\n edgecolor=self.gray,\n s=np.square(self.linewidth * 2))\n\n def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1360, "name": "draw_quartiles", "kind": "def", "category": "function", "info": " def draw_quartiles(self, ax, data, support, density, center, split=False):\n \"\"\"Draw the quartiles as lines at width of density.\"\"\"\n q25, q50, q75 = np.percentile(data, [25, 50, 75])\n\n self.draw_to_density(ax, center, q25, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n self.draw_to_density(ax, center, q50, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 3] * 2)\n self.draw_to_density(ax, center, q75, support, density, split,\n linewidth=self.linewidth,\n dashes=[self.linewidth * 1.5] * 2)\n\n def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1364, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q25, support, density, split,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1367, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q50, support, density, split,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1370, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, q75, support, density, split,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1374, "name": "draw_points", "kind": "def", "category": "function", "info": " def draw_points(self, ax, data, center):\n \"\"\"Draw individual observations as points at middle of the violin.\"\"\"\n kws = dict(s=np.square(self.linewidth * 2),\n color=self.gray,\n edgecolor=self.gray)\n\n grid = np.ones(len(data)) * center\n\n if self.orient == \"v\":\n ax.scatter(grid, data, **kws)\n else:\n ax.scatter(data, grid, **kws)\n\n def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1387, "name": "draw_stick_lines", "kind": "def", "category": "function", "info": " def draw_stick_lines(self, ax, data, support, density,\n center, split=False):\n \"\"\"Draw individual observations as sticks at width of density.\"\"\"\n for val in data:\n self.draw_to_density(ax, center, val, support, density, split,\n linewidth=self.linewidth * .5)\n\n def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1391, "name": "draw_to_density", "kind": "ref", "category": "function", "info": " self.draw_to_density(ax, center, val, support, density, split,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1394, "name": "draw_to_density", "kind": "def", "category": "function", "info": " def draw_to_density(self, ax, center, val, support, density, split, **kws):\n \"\"\"Draw a line orthogonal to the value axis at width of density.\"\"\"\n idx = np.argmin(np.abs(support - val))\n width = self.dwidth * density[idx] * .99\n\n kws[\"color\"] = self.gray\n\n if self.orient == \"v\":\n if split == \"left\":\n ax.plot([center - width, center], [val, val], **kws)\n elif split == \"right\":\n ax.plot([center, center + width], [val, val], **kws)\n else:\n ax.plot([center - width, center + width], [val, val], **kws)\n else:\n if split == \"left\":\n ax.plot([val, val], [center - width, center], **kws)\n elif split == \"right\":\n ax.plot([val, val], [center, center + width], **kws)\n else:\n ax.plot([val, val], [center - width, center + width], **kws)\n\n def plot(self, ax):\n \"\"\"Make the violin plot.\"\"\"\n self.draw_violins(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1418, "name": "draw_violins", "kind": "ref", "category": "function", "info": " self.draw_violins(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1419, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1421, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1424, "name": "_CategoricalStatPlotter", "kind": "def", "category": "class", "info": "nested_width\testimate_statistic\tdraw_confints"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1429, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1437, "name": "estimate_statistic", "kind": "def", "category": "function", "info": " def estimate_statistic(self, estimator, errorbar, n_boot, seed):\n\n if self.hue_names is None:\n statistic = []\n confint = []\n else:\n statistic = [[] for _ in self.plot_data]\n confint = [[] for _ in self.plot_data]\n\n var = {\"v\": \"y\", \"h\": \"x\"}[self.orient]\n\n agg = EstimateAggregator(estimator, errorbar, n_boot=n_boot, seed=seed)\n\n for i, group_data in enumerate(self.plot_data):\n\n # Option 1: we have a single layer of grouping\n # --------------------------------------------\n if self.plot_hues is None:\n\n df = pd.DataFrame({var: group_data})\n if self.plot_units is not None:\n df[\"units\"] = self.plot_units[i]\n\n res = agg(df, var)\n\n statistic.append(res[var])\n if errorbar is not None:\n confint.append((res[f\"{var}min\"], res[f\"{var}max\"]))\n\n # Option 2: we are grouping by a hue layer\n # ----------------------------------------\n\n else:\n for hue_level in self.hue_names:\n\n if not self.plot_hues[i].size:\n statistic[i].append(np.nan)\n if errorbar is not None:\n confint[i].append((np.nan, np.nan))\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n df = pd.DataFrame({var: group_data[hue_mask]})\n if self.plot_units is not None:\n df[\"units\"] = self.plot_units[i][hue_mask]\n\n res = agg(df, var)\n\n statistic[i].append(res[var])\n if errorbar is not None:\n confint[i].append((res[f\"{var}min\"], res[f\"{var}max\"]))\n\n # Save the resulting values for plotting\n self.statistic = np.array(statistic)\n self.confint = np.array(confint)\n\n def draw_confints(self, ax, at_group, confint, colors,\n errwidth=None, capsize=None, **kws):\n\n if errwidth is not None:\n kws.setdefault(\"lw\", errwidth)\n else:\n kws.setdefault(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n\n for at, (ci_low, ci_high), color in zip(at_group,\n confint,\n colors):\n if self.orient == \"v\":\n ax.plot([at, at], [ci_low, ci_high], color=color, **kws)\n if capsize is not None:\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_low, ci_low], color=color, **kws)\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_high, ci_high], color=color, **kws)\n else:\n ax.plot([ci_low, ci_high], [at, at], color=color, **kws)\n if capsize is not None:\n ax.plot([ci_low, ci_low],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n ax.plot([ci_high, ci_high],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1448, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " agg = EstimateAggregator(estimator, errorbar, n_boot=n_boot, seed=seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1460, "name": "agg", "kind": "ref", "category": "function", "info": " res = agg(df, var)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1483, "name": "agg", "kind": "ref", "category": "function", "info": " res = agg(df, var)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1493, "name": "draw_confints", "kind": "def", "category": "function", "info": " def draw_confints(self, ax, at_group, confint, colors,\n errwidth=None, capsize=None, **kws):\n\n if errwidth is not None:\n kws.setdefault(\"lw\", errwidth)\n else:\n kws.setdefault(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n\n for at, (ci_low, ci_high), color in zip(at_group,\n confint,\n colors):\n if self.orient == \"v\":\n ax.plot([at, at], [ci_low, ci_high], color=color, **kws)\n if capsize is not None:\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_low, ci_low], color=color, **kws)\n ax.plot([at - capsize / 2, at + capsize / 2],\n [ci_high, ci_high], color=color, **kws)\n else:\n ax.plot([ci_low, ci_high], [at, at], color=color, **kws)\n if capsize is not None:\n ax.plot([ci_low, ci_low],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n ax.plot([ci_high, ci_high],\n [at - capsize / 2, at + capsize / 2],\n color=color, **kws)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1522, "name": "_BarPlotter", "kind": "def", "category": "class", "info": "__init__\tdraw_bars\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1529, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1531, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1532, "name": "estimate_statistic", "kind": "ref", "category": "function", "info": " self.estimate_statistic(estimator, errorbar, n_boot, seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1541, "name": "draw_bars", "kind": "def", "category": "function", "info": " def draw_bars(self, ax, kws):\n \"\"\"Draw the bars onto `ax`.\"\"\"\n # Get the right matplotlib function depending on the orientation\n barfunc = ax.bar if self.orient == \"v\" else ax.barh\n barpos = np.arange(len(self.statistic))\n\n if self.plot_hues is None:\n\n # Draw the bars\n barfunc(barpos, self.statistic, self.width,\n color=self.colors, align=\"center\", **kws)\n\n # Draw the confidence intervals\n errcolors = [self.errcolor] * len(barpos)\n self.draw_confints(ax,\n barpos,\n self.confint,\n errcolors,\n self.errwidth,\n self.capsize)\n\n else:\n\n for j, hue_level in enumerate(self.hue_names):\n\n # Draw the bars\n offpos = barpos + self.hue_offsets[j]\n barfunc(offpos, self.statistic[:, j], self.nested_width,\n color=self.colors[j], align=\"center\",\n label=hue_level, **kws)\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = self.confint[:, j]\n errcolors = [self.errcolor] * len(offpos)\n self.draw_confints(ax,\n offpos,\n confint,\n errcolors,\n self.errwidth,\n self.capsize)\n\n def plot(self, ax, bar_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_bars(ax, bar_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1550, "name": "barfunc", "kind": "ref", "category": "function", "info": " barfunc(barpos, self.statistic, self.width,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1555, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1568, "name": "barfunc", "kind": "ref", "category": "function", "info": " barfunc(offpos, self.statistic[:, j], self.nested_width,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1576, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1585, "name": "draw_bars", "kind": "ref", "category": "function", "info": " self.draw_bars(ax, bar_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1586, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1588, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1591, "name": "_PointPlotter", "kind": "def", "category": "class", "info": "__init__\thue_offsets\tdraw_points\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1600, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1602, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1603, "name": "estimate_statistic", "kind": "ref", "category": "function", "info": " self.estimate_statistic(estimator, errorbar, n_boot, seed)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1607, "name": "color_palette", "kind": "ref", "category": "function", "info": " self.colors = [color_palette()[0]] * len(self.colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1636, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"v\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"v\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"v\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1645, "name": "draw_points", "kind": "def", "category": "function", "info": " def draw_points(self, ax):\n \"\"\"Draw the main data components of the plot.\"\"\"\n # Get the center positions on the categorical axis\n pointpos = np.arange(len(self.statistic))\n\n # Get the size of the plot elements\n lw = mpl.rcParams[\"lines.linewidth\"] * 1.8 * self.scale\n mew = lw * .75\n markersize = np.pi * np.square(lw) * 2\n\n if self.plot_hues is None:\n\n # Draw lines joining each estimate point\n if self.join:\n color = self.colors[0]\n ls = self.linestyles[0]\n if self.orient == \"h\":\n ax.plot(self.statistic, pointpos,\n color=color, ls=ls, lw=lw)\n else:\n ax.plot(pointpos, self.statistic,\n color=color, ls=ls, lw=lw)\n\n # Draw the confidence intervals\n self.draw_confints(ax, pointpos, self.confint, self.colors,\n self.errwidth, self.capsize)\n\n # Draw the estimate points\n marker = self.markers[0]\n colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]\n if self.orient == \"h\":\n x, y = self.statistic, pointpos\n else:\n x, y = pointpos, self.statistic\n ax.scatter(x, y,\n linewidth=mew, marker=marker, s=markersize,\n facecolor=colors, edgecolor=colors, label=self.label)\n\n else:\n\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Determine the values to plot for this level\n statistic = self.statistic[:, j]\n\n # Determine the position on the categorical and z axes\n offpos = pointpos + offsets[j]\n z = j + 1\n\n # Draw lines joining each estimate point\n if self.join:\n color = self.colors[j]\n ls = self.linestyles[j]\n if self.orient == \"h\":\n ax.plot(statistic, offpos, color=color,\n zorder=z, ls=ls, lw=lw)\n else:\n ax.plot(offpos, statistic, color=color,\n zorder=z, ls=ls, lw=lw)\n\n # Draw the confidence intervals\n if self.confint.size:\n confint = self.confint[:, j]\n errcolors = [self.colors[j]] * len(offpos)\n self.draw_confints(ax, offpos, confint, errcolors,\n self.errwidth, self.capsize,\n zorder=z)\n\n # Draw the estimate points\n n_points = len(remove_na(offpos))\n marker = self.markers[j]\n color = mpl.colors.colorConverter.to_rgb(self.colors[j])\n\n if self.orient == \"h\":\n x, y = statistic, offpos\n else:\n x, y = offpos, statistic\n\n if not len(remove_na(statistic)):\n x = y = [np.nan] * n_points\n\n ax.scatter(x, y, label=hue_level,\n facecolor=color, edgecolor=color,\n linewidth=mew, marker=marker, s=markersize,\n zorder=z)\n\n def plot(self, ax):\n \"\"\"Make the plot.\"\"\"\n self.draw_points(ax)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1669, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax, pointpos, self.confint, self.colors,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1674, "name": "to_rgb", "kind": "ref", "category": "function", "info": " colors = [mpl.colors.colorConverter.to_rgb(c) for c in self.colors]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1710, "name": "draw_confints", "kind": "ref", "category": "function", "info": " self.draw_confints(ax, offpos, confint, errcolors,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1715, "name": "remove_na", "kind": "ref", "category": "function", "info": " n_points = len(remove_na(offpos))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1717, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.colorConverter.to_rgb(self.colors[j])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1724, "name": "remove_na", "kind": "ref", "category": "function", "info": " if not len(remove_na(statistic)):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1734, "name": "draw_points", "kind": "ref", "category": "function", "info": " self.draw_points(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1735, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1737, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1740, "name": "_CountPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1744, "name": "_LVPlotter", "kind": "def", "category": "class", "info": "__init__\t_lv_box_ends\t_lv_outliers\t_width_functions\t_lvplot\tdraw_letter_value_plot\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1784, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1785, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1787, "name": "_lv_box_ends", "kind": "def", "category": "function", "info": " def _lv_box_ends(self, vals):\n \"\"\"Get the number of data points and calculate `depth` of\n letter-value plot.\"\"\"\n vals = np.asarray(vals)\n # Remove infinite values while handling a 'object' dtype\n # that can come from pd.Float64Dtype() input\n with pd.option_context('mode.use_inf_as_na', True):\n vals = vals[~pd.isnull(vals)]\n n = len(vals)\n p = self.outlier_prop\n\n # Select the depth, i.e. number of boxes to draw, based on the method\n if self.k_depth == 'full':\n # extend boxes to 100% of the data\n k = int(np.log2(n)) + 1\n elif self.k_depth == 'tukey':\n # This results with 5-8 points in each tail\n k = int(np.log2(n)) - 3\n elif self.k_depth == 'proportion':\n k = int(np.log2(n)) - int(np.log2(n * p)) + 1\n elif self.k_depth == 'trustworthy':\n point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n k = int(np.log2(n / point_conf)) + 1\n else:\n k = int(self.k_depth) # allow having k as input\n # If the number happens to be less than 1, set k to 1\n if k < 1:\n k = 1\n\n # Calculate the upper end for each of the k boxes\n upper = [100 * (1 - 0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Calculate the lower end for each of the k boxes\n lower = [100 * (0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Stitch the box ends together\n percentile_ends = [(i, j) for i, j in zip(lower, upper)]\n box_ends = [np.percentile(vals, q) for q in percentile_ends]\n return box_ends, k\n\n def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1808, "name": "_normal_quantile_func", "kind": "ref", "category": "function", "info": " point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1825, "name": "_lv_outliers", "kind": "def", "category": "function", "info": " def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1834, "name": "_width_functions", "kind": "def", "category": "function", "info": " def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1841, "name": "_lvplot", "kind": "def", "category": "function", "info": " def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"v\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1892, "name": "_lv_box_ends", "kind": "ref", "category": "function", "info": " box_ends, k = self._lv_box_ends(box_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1896, "name": "_width_functions", "kind": "ref", "category": "function", "info": " width = self._width_functions(self.scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1899, "name": "height", "kind": "def", "category": "function", "info": " def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1903, "name": "vert_perc_box", "kind": "def", "category": "function", "info": " def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1906, "name": "height", "kind": "ref", "category": "function", "info": " height(b), fill=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1909, "name": "horz_perc_box", "kind": "def", "category": "function", "info": " def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1911, "name": "height", "kind": "ref", "category": "function", "info": " height(b), widths * w,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1916, "name": "width", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1916, "name": "height", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1926, "name": "_lv_outliers", "kind": "ref", "category": "function", "info": " outliers = self._lv_outliers(box_data, k)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1927, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex_color = mpl.colors.rgb2hex(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1958, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1960, "name": "cmap", "kind": "ref", "category": "function", "info": " rgb = [hex_color, cmap(.85)]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1961, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1966, "name": "box_func", "kind": "ref", "category": "function", "info": " boxes = [box_func(x, b[0], i, k, b[1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1975, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(collection)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1977, "name": "draw_letter_value_plot", "kind": "def", "category": "function", "info": " def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1991, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1999, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2015, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2022, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2030, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2040, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=self.orient == \"h\", scaley=self.orient == \"v\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2044, "name": "draw_letter_value_plot", "kind": "ref", "category": "function", "info": " self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2045, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2047, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2230, "name": "_BoxPlotter", "kind": "ref", "category": "function", "info": " plotter = _BoxPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2304, "name": "_ViolinPlotter", "kind": "ref", "category": "function", "info": " plotter = _ViolinPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2398, "name": "boxenplot", "kind": "def", "category": "function", "info": "def boxenplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75,\n width=.8, dodge=True, k_depth='tukey', linewidth=None,\n scale='exponential', outlier_prop=0.007, trust_alpha=0.05,\n showfliers=True,\n ax=None, box_kws=None, flier_kws=None, line_kws=None,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2406, "name": "_LVPlotter", "kind": "ref", "category": "function", "info": " plotter = _LVPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2494, "name": "stripplot", "kind": "def", "category": "function", "info": "def stripplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n jitter=True, dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0,\n hue_norm=None, native_scale=False, formatter=None, legend=\"auto\",\n ax=None, **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2502, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2504, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2515, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2517, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2519, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2520, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2522, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2524, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2536, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2547, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2548, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2616, "name": "swarmplot", "kind": "def", "category": "function", "info": "def swarmplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0, hue_norm=None,\n native_scale=False, formatter=None, legend=\"auto\", warn_thresh=.05,\n ax=None, **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2624, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2626, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2637, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2639, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2644, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2645, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2647, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2649, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2663, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2671, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2672, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2738, "name": "barplot", "kind": "def", "category": "function", "info": "def barplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, units=None, seed=None,\n orient=None, color=None, palette=None, saturation=.75, width=.8,\n errcolor=\".26\", errwidth=None, capsize=None, dodge=True, ci=\"deprecated\",\n ax=None,\n **kwargs,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2747, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2754, "name": "_BarPlotter", "kind": "ref", "category": "function", "info": " plotter = _BarPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2828, "name": "pointplot", "kind": "def", "category": "function", "info": "def pointplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, units=None, seed=None,\n markers=\"o\", linestyles=\"-\", dodge=False, join=True, scale=1,\n orient=None, color=None, palette=None, errwidth=None, ci=\"deprecated\",\n capsize=None, label=None, ax=None,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2836, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2838, "name": "_PointPlotter", "kind": "ref", "category": "function", "info": " plotter = _PointPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2918, "name": "countplot", "kind": "def", "category": "function", "info": "def countplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75, width=.8,\n dodge=True, ax=None, **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2942, "name": "_CountPlotter", "kind": "ref", "category": "function", "info": " plotter = _CountPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3002, "name": "catplot", "kind": "def", "category": "function", "info": "def catplot(\n data=None, *, x=None, y=None, hue=None, row=None, col=None,\n col_wrap=None, estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000,\n units=None, seed=None, order=None, hue_order=None, row_order=None,\n col_order=None, height=5, aspect=1, kind=\"strip\", native_scale=False,\n formatter=None, orient=None, color=None, palette=None, hue_norm=None,\n legend=\"auto\", legend_out=True, sharex=True, sharey=True,\n margin_titles=False, facet_kws=None, ci=\"deprecated\",\n **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3030, "name": "_CategoricalFacetPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalFacetPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3032, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalFacetPlotter.get_semantics(locals()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3047, "name": "rename", "kind": "ref", "category": "function", "info": " data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3048, "name": "duplicated", "kind": "ref", "category": "function", "info": " data = data.loc[:, ~data.columns.duplicated()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3056, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3071, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.cat_axis, order=order, formatter=formatter)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3073, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3078, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3079, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3080, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3101, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3125, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3135, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.cat_axis)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3137, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3141, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3146, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " g._update_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3150, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3184, "name": "_CategoricalPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotter()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3186, "name": "establish_variables", "kind": "ref", "category": "function", "info": " p.establish_variables(x_, y_, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3210, "name": "establish_colors", "kind": "ref", "category": "function", "info": " p.establish_colors(color, palette, 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3233, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3240, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(**facet_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3243, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(plot_func, x=x, y=y, hue=hue, **plot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3246, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.value_label, p.group_label)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3248, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.group_label, p.value_label)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3253, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(x_var=\"count\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3255, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(y_var=\"count\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3259, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3351, "name": "Beeswarm", "kind": "def", "category": "class", "info": "__init__\t__call__\tbeeswarm\tcould_overlap\tposition_candidates\tfirst_non_overlapping_candidate\tadd_gutters"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3379, "name": "transform", "kind": "ref", "category": "function", "info": " orig_xy = ax.transData.transform(orig_xy_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3386, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3389, "name": "item", "kind": "ref", "category": "function", "info": " edge = points.get_linewidth().item()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3399, "name": "beeswarm", "kind": "ref", "category": "function", "info": " new_xyr[sorter] = self.beeswarm(orig_xyr)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3406, "name": "inverted", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3406, "name": "transform", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3413, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_y_data, center, log_scale=log_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3415, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_x_data, center, log_scale=log_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3423, "name": "beeswarm", "kind": "def", "category": "function", "info": " def beeswarm(self, orig_xyr):\n \"\"\"Adjust x position of points to avoid overlaps.\"\"\"\n # In this method, `x` is always the categorical axis\n # Center of the swarm, in point coordinates\n midline = orig_xyr[0, 0]\n\n # Start the swarm with the first point\n swarm = np.atleast_2d(orig_xyr[0])\n\n # Loop over the remaining points\n for xyr_i in orig_xyr[1:]:\n\n # Find the points in the swarm that could possibly\n # overlap with the point we are currently placing\n neighbors = self.could_overlap(xyr_i, swarm)\n\n # Find positions that would be valid individually\n # with respect to each of the swarm neighbors\n candidates = self.position_candidates(xyr_i, neighbors)\n\n # Sort candidates by their centrality\n offsets = np.abs(candidates[:, 0] - midline)\n candidates = candidates[np.argsort(offsets)]\n\n # Find the first candidate that does not overlap any neighbors\n new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n\n # Place it into the swarm\n swarm = np.vstack([swarm, new_xyr_i])\n\n return swarm\n\n def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3437, "name": "could_overlap", "kind": "ref", "category": "function", "info": " neighbors = self.could_overlap(xyr_i, swarm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3441, "name": "position_candidates", "kind": "ref", "category": "function", "info": " candidates = self.position_candidates(xyr_i, neighbors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3448, "name": "first_non_overlapping_candidate", "kind": "ref", "category": "function", "info": " new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3455, "name": "could_overlap", "kind": "def", "category": "function", "info": " def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3469, "name": "position_candidates", "kind": "def", "category": "function", "info": " def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3486, "name": "first_non_overlapping_candidate", "kind": "def", "category": "function", "info": " def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3519, "name": "add_gutters", "kind": "def", "category": "function", "info": " def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/cm.py", "rel_fname": "seaborn/cm.py", "line": 1582, "name": "register_colormap", "kind": "ref", "category": "function", "info": " register_colormap(_name, _cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/cm.py", "rel_fname": "seaborn/cm.py", "line": 1583, "name": "register_colormap", "kind": "ref", "category": "function", "info": " register_colormap(_name + \"_r\", _cmap_r)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 84, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 86, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 87, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " dist=DocstringComponents(_dist_params),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 88, "name": "from_function_params", "kind": "ref", "category": "function", "info": " kde=DocstringComponents.from_function_params(KDE.__init__),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 89, "name": "from_function_params", "kind": "ref", "category": "function", "info": " hist=DocstringComponents.from_function_params(Histogram.__init__),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 90, "name": "from_function_params", "kind": "ref", "category": "function", "info": " ecdf=DocstringComponents.from_function_params(ECDF.__init__),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 99, "name": "_DistributionPlotter", "kind": "def", "category": "class", "info": "__init__\tunivariate\tdata_variable\thas_xy_data\t_add_legend\t_artist_kws\t_quantile_to_level\t_cmap_from_color\t_default_discrete\t_resolve_multiple\t_compute_univariate_density\tplot_univariate_histogram\tplot_bivariate_histogram\tplot_univariate_density\tplot_bivariate_density\tplot_univariate_ecdf\tplot_rug\t_plot_single_rug"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 115, "name": "univariate", "kind": "def", "category": "function", "info": " def univariate(self):\n \"\"\"Return True if only x or y are used.\"\"\"\n # TODO this could go down to core, but putting it here now.\n # We'd want to be conceptually clear that univariate only applies\n # to x/y and not to other semantics, which can exist.\n # We haven't settled on a good conceptual name for x/y.\n return bool({\"x\", \"y\"} - set(self.variables))\n\n @property\n def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 124, "name": "data_variable", "kind": "def", "category": "function", "info": " def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 132, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 137, "name": "_add_legend", "kind": "def", "category": "function", "info": " def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 146, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 148, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " kws = self._artist_kws(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 158, "name": "artist", "kind": "ref", "category": "function", "info": " handles.append(artist(**kws))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 165, "name": "add_legend", "kind": "ref", "category": "function", "info": " ax_obj.add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 172, "name": "_artist_kws", "kind": "def", "category": "function", "info": " def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 176, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 195, "name": "_quantile_to_level", "kind": "def", "category": "function", "info": " def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 205, "name": "_cmap_from_color", "kind": "def", "category": "function", "info": " def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 210, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, _ = husl.rgb_to_husl(r, g, b)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 216, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 217, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(colors[::-1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 219, "name": "_default_discrete", "kind": "def", "category": "function", "info": " def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 229, "name": "_resolve_multiple", "kind": "def", "category": "function", "info": " def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 265, "name": "div", "kind": "ref", "category": "function", "info": " curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 268, "name": "shift", "kind": "ref", "category": "function", "info": " baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 268, "name": "fillna", "kind": "ref", "category": "function", "info": " baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 278, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = curves[key].reset_index(name=\"heights\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 280, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 292, "name": "set_index", "kind": "ref", "category": "function", "info": " curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 300, "name": "_compute_univariate_density", "kind": "def", "category": "function", "info": " def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 311, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 315, "name": "dropna", "kind": "ref", "category": "function", "info": " all_observations = self.comp_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 316, "name": "define_support", "kind": "ref", "category": "function", "info": " estimator.define_support(all_observations[data_variable])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 320, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 328, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 348, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(observations, weights=weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 378, "name": "plot_univariate_histogram", "kind": "def", "category": "function", "info": " def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 401, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 402, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 425, "name": "Hist", "kind": "ref", "category": "function", "info": " estimator = Hist(**estimate_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 429, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 435, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = estimator._define_bin_params(all_data, orient, None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 449, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 450, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 460, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 474, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = estimator._define_bin_params(sub_data, orient, None)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 475, "name": "_normalize", "kind": "ref", "category": "function", "info": " res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 475, "name": "_eval", "kind": "ref", "category": "function", "info": " res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 476, "name": "to_numpy", "kind": "ref", "category": "function", "info": " heights = res[estimator.stat].to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 477, "name": "to_numpy", "kind": "ref", "category": "function", "info": " widths = res[\"space\"].to_numpy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 478, "name": "to_numpy", "kind": "ref", "category": "function", "info": " edges = res[orient].to_numpy() - widths / 2\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 490, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 497, "name": "from_arrays", "kind": "ref", "category": "function", "info": " index = pd.MultiIndex.from_arrays([\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 511, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " histograms, baselines = self._resolve_multiple(histograms, multiple)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 513, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, _ = self._resolve_multiple(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 521, "name": "to_frame", "kind": "ref", "category": "function", "info": " bin_vals = histograms.index.to_frame()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 526, "name": "idxmax", "kind": "ref", "category": "function", "info": " edges.max() + widths.loc[edges.idxmax()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 551, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 554, "name": "rename", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 554, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 557, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 561, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 565, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 574, "name": "plot_func", "kind": "ref", "category": "function", "info": " artists = plot_func(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 670, "name": "to_frame", "kind": "ref", "category": "function", "info": " h.index.to_frame() for _, h in histograms.items()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 671, "name": "reset_index", "kind": "ref", "category": "function", "info": " ]).reset_index(drop=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 672, "name": "idxmin", "kind": "ref", "category": "function", "info": " thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 680, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 682, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 686, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 690, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([left_edge + binwidth] * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 691, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([left_edge] * 2)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 728, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 739, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 743, "name": "plot_bivariate_histogram", "kind": "def", "category": "function", "info": " def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 757, "name": "Histogram", "kind": "ref", "category": "function", "info": " estimator = Histogram(**estimate_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 761, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 763, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " estimator.define_bin_params(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 774, "name": "iter_data", "kind": "ref", "category": "function", "info": " for _, sub_data in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 775, "name": "estimator", "kind": "ref", "category": "function", "info": " sub_heights, _ = estimator(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 783, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(full_heights, pthresh)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 788, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(full_heights, pmax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 800, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 806, "name": "estimator", "kind": "ref", "category": "function", "info": " heights, (x_edges, y_edges) = estimator(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 813, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 815, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 825, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 826, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 831, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 833, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 838, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(heights, pmax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 844, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(heights, pthresh)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 846, "name": "masked_less_equal", "kind": "ref", "category": "function", "info": " heights = np.ma.masked_less_equal(heights, thresh)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 849, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 853, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 854, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 885, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 896, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 900, "name": "plot_univariate_density", "kind": "def", "category": "function", "info": " def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 922, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " plot_kws = _normalize_kwargs(plot_kws, artist)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 925, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 933, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 936, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 946, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, baselines = self._resolve_multiple(densities, multiple)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 969, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 980, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 983, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 987, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1021, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1031, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1035, "name": "plot_bivariate_density", "kind": "def", "category": "function", "info": " def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1053, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1058, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1063, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1067, "name": "fillna", "kind": "ref", "category": "function", "info": " min_variance = observations.var().fillna(0).min()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1080, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(*observations, weights=weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1097, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1099, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1122, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " common_levels = self._quantile_to_level(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1128, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " k: self._quantile_to_level(d, levels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1144, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1152, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1157, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1160, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1162, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " contour_kws[\"cmap\"] = self._cmap_from_color(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1166, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1184, "name": "contour_func", "kind": "ref", "category": "function", "info": " cset = contour_func(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1202, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1217, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1221, "name": "plot_univariate_ecdf", "kind": "def", "category": "function", "info": " def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1223, "name": "ECDF", "kind": "ref", "category": "function", "info": " estimator = ECDF(**estimate_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1230, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1240, "name": "estimator", "kind": "ref", "category": "function", "info": " stat, vals = estimator(observations, weights=weights)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1245, "name": "_hue_map", "kind": "ref", "category": "function", "info": " artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1249, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1267, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1280, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1286, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1290, "name": "plot_rug", "kind": "def", "category": "function", "info": " def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1292, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1294, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1311, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1313, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1316, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1320, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1324, "name": "_plot_single_rug", "kind": "def", "category": "function", "info": " def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1331, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1336, "name": "_hue_map", "kind": "ref", "category": "function", "info": " colors = self._hue_map(sub_data[\"hue\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1357, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(LineCollection(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1361, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1364, "name": "_DistributionFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1373, "name": "histplot", "kind": "def", "category": "function", "info": "def histplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Histogram computation parameters\n stat=\"count\", bins=\"auto\", binwidth=None, binrange=None,\n discrete=None, cumulative=False, common_bins=True, common_norm=True,\n # Histogram appearance parameters\n multiple=\"layer\", element=\"bars\", fill=True, shrink=1,\n # Histogram smoothing with a kernel density estimate\n kde=False, kde_kws=None, line_kws=None,\n # Bivariate histogram parameters\n thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1394, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1396, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1399, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1404, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1411, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1418, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " discrete = p._default_discrete()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1431, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1449, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1596, "name": "kdeplot", "kind": "def", "category": "function", "info": "def kdeplot(\n data=None, *, x=None, y=None, hue=None, weights=None,\n palette=None, hue_order=None, hue_norm=None, color=None, fill=None,\n multiple=\"layer\", common_norm=True, common_grid=False, cumulative=False,\n bw_method=\"scott\", bw_adjust=1, warn_singular=True, log_scale=None,\n levels=10, thresh=.05, gridsize=200, cut=3, clip=None,\n legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,\n **kwargs,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1684, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1686, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1689, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1694, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, allowed_types=[\"numeric\", \"datetime\"], log_scale=log_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1697, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1716, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1730, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1876, "name": "ecdfplot", "kind": "def", "category": "function", "info": "def ecdfplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Computation parameters\n stat=\"proportion\", complementary=False,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1890, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1892, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1895, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1906, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1909, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1923, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1988, "name": "rugplot", "kind": "def", "category": "function", "info": "def rugplot(\n data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,\n palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2046, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2048, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2050, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2055, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2058, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2063, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(height, expand_margins, legend, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2110, "name": "displot", "kind": "def", "category": "function", "info": "def displot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, row=None, col=None, weights=None,\n # Other plot parameters\n kind=\"hist\", rug=False, rug_kws=None, log_scale=None, legend=True,\n # Hue-mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Faceting parameters\n col_wrap=None, row_order=None, col_order=None,\n height=5, aspect=1, facet_kws=None,\n **kwargs,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2124, "name": "_DistributionFacetPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionFacetPlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2126, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionFacetPlotter.get_semantics(locals())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2129, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2131, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", [\"hist\", \"kde\", \"ecdf\"], kind)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2150, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2151, "name": "duplicated", "kind": "ref", "category": "function", "info": " grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2159, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2172, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g, allowed_types=allowed_types, log_scale=log_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2192, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2200, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " estimate_kws[\"discrete\"] = p._default_discrete()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2208, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2209, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(**hist_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2213, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2214, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(**hist_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2222, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2233, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2234, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(**kde_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2238, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2239, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(**kde_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2248, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2257, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2258, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(**ecdf_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2269, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(rug_kws, p.plot_rug, rugplot)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2273, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(**rug_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2277, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2278, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " x_var=p.variables.get(\"x\", g.axes.flat[0].get_xlabel()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2279, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " y_var=p.variables.get(\"y\", g.axes.flat[0].get_ylabel()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2281, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2297, "name": "rename", "kind": "ref", "category": "function", "info": " g.data = p.plot_data.rename(columns=wide_cols)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2389, "name": "_freedman_diaconis_bins", "kind": "def", "category": "function", "info": "def _freedman_diaconis_bins(a):\n \"\"\"Calculate number of hist bins using Freedman-Diaconis rule.\"\"\"\n # From https://stats.stackexchange.com/questions/798/\n a = np.asarray(a)\n if len(a) < 2:\n return 1\n iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n h = 2 * iqr / (len(a) ** (1 / 3))\n # fall back to sqrt(a) bins if iqr is 0\n if h == 0:\n return int(np.sqrt(a.size))\n else:\n return int(np.ceil((a.max() - a.min()) / h))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2395, "name": "reduce", "kind": "ref", "category": "function", "info": " iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2404, "name": "distplot", "kind": "def", "category": "function", "info": "def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,\n hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,\n color=None, vertical=False, norm_hist=False, axlabel=None,\n label=None, ax=None, x=None):\n \"\"\"\n DEPRECATED\n\n This function has been deprecated and will be removed in seaborn v0.14.0.\n It has been replaced by :func:`histplot` and :func:`displot`, two functions\n with a modern API and many more capabilities.\n\n For a guide to updating, please see this notebook:\n\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n\n \"\"\"\n\n if kde and not hist:\n axes_level_suggestion = (\n \"`kdeplot` (an axes-level function for kernel density plots)\"\n )\n else:\n axes_level_suggestion = (\n \"`histplot` (an axes-level function for histograms)\"\n )\n\n msg = textwrap.dedent(f\"\"\"\n\n `distplot` is a deprecated function and will be removed in seaborn v0.14.0.\n\n Please adapt your code to use either `displot` (a figure-level function with\n similar flexibility) or {axes_level_suggestion}.\n\n For a guide to updating your code to use the new functions, please see\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n \"\"\")\n warnings.warn(msg, UserWarning, stacklevel=2)\n\n if ax is None:\n ax = plt.gca()\n\n # Intelligently label the support axis\n label_ax = bool(axlabel)\n if axlabel is None and hasattr(a, \"name\"):\n axlabel = a.name\n if axlabel is not None:\n label_ax = True\n\n # Support new-style API\n if x is not None:\n a = x\n\n # Make a a 1-d float array\n a = np.asarray(a, float)\n if a.ndim > 1:\n a = a.squeeze()\n\n # Drop null values from array\n a = remove_na(a)\n\n # Decide if the hist is normed\n norm_hist = norm_hist or kde or (fit is not None)\n\n # Handle dictionary defaults\n hist_kws = {} if hist_kws is None else hist_kws.copy()\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n rug_kws = {} if rug_kws is None else rug_kws.copy()\n fit_kws = {} if fit_kws is None else fit_kws.copy()\n\n # Get the color from the current color cycle\n if color is None:\n if vertical:\n line, = ax.plot(0, a.mean())\n else:\n line, = ax.plot(a.mean(), 0)\n color = line.get_color()\n line.remove()\n\n # Plug the label into the right kwarg dictionary\n if label is not None:\n if hist:\n hist_kws[\"label\"] = label\n elif kde:\n kde_kws[\"label\"] = label\n elif rug:\n rug_kws[\"label\"] = label\n elif fit:\n fit_kws[\"label\"] = label\n\n if hist:\n if bins is None:\n bins = min(_freedman_diaconis_bins(a), 50)\n hist_kws.setdefault(\"alpha\", 0.4)\n hist_kws.setdefault(\"density\", norm_hist)\n\n orientation = \"horizontal\" if vertical else \"vertical\"\n hist_color = hist_kws.pop(\"color\", color)\n ax.hist(a, bins, orientation=orientation,\n color=hist_color, **hist_kws)\n if hist_color != color:\n hist_kws[\"color\"] = hist_color\n\n axis = \"y\" if vertical else \"x\"\n\n if kde:\n kde_color = kde_kws.pop(\"color\", color)\n kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n if kde_color != color:\n kde_kws[\"color\"] = kde_color\n\n if rug:\n rug_color = rug_kws.pop(\"color\", color)\n rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n if rug_color != color:\n rug_kws[\"color\"] = rug_color\n\n if fit is not None:\n\n def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2462, "name": "remove_na", "kind": "ref", "category": "function", "info": " a = remove_na(a)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2495, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " bins = min(_freedman_diaconis_bins(a), 50)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2510, "name": "kdeplot", "kind": "ref", "category": "function", "info": " kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2516, "name": "rugplot", "kind": "ref", "category": "function", "info": " rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2522, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2523, "name": "pdf", "kind": "ref", "category": "function", "info": " return fit.pdf(x, *params)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2529, "name": "gaussian_kde", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2529, "name": "scotts_factor", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2530, "name": "_kde_support", "kind": "ref", "category": "function", "info": " x = _kde_support(a, bw, gridsize, cut, clip)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2531, "name": "fit", "kind": "ref", "category": "function", "info": " params = fit.fit(a)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2532, "name": "pdf", "kind": "ref", "category": "function", "info": " y = pdf(x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2541, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(axlabel)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2543, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(axlabel)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 72, "name": "user_cache_dir", "kind": "def", "category": "function", "info": "def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):\n r\"\"\"Return full path to the user-specific cache dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"opinion\" (boolean) can be False to disable the appending of\n \"Cache\" to the base app data dir for Windows. See\n discussion below.\n\n Typical user cache directories are:\n Mac OS X: ~/Library/Caches/\n Unix: ~/.cache/ (XDG default)\n Win XP: C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\\Cache\n Vista: C:\\Users\\\\AppData\\Local\\\\\\Cache\n\n On Windows the only suggestion in the MSDN docs is that local settings go in\n the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming\n app data dir (the default returned by `user_data_dir` above). Apps typically\n put cache data somewhere *under* the given dir here. Some examples:\n ...\\Mozilla\\Firefox\\Profiles\\\\Cache\n ...\\Acme\\SuperApp\\Cache\\1.0\n OPINION: This function appends \"Cache\" to the `CSIDL_LOCAL_APPDATA` value.\n This can be disabled with the `opinion=False` option.\n \"\"\"\n if system == \"win32\":\n if appauthor is None:\n appauthor = appname\n path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n if opinion:\n path = os.path.join(path, \"Cache\")\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Caches')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 108, "name": "normpath", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 108, "name": "_get_win_folder", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 117, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('~/Library/Caches')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 121, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 131, "name": "_get_win_folder_from_registry", "kind": "def", "category": "function", "info": "def _get_win_folder_from_registry(csidl_name):\n \"\"\"This is a fallback technique at best. I'm not sure if using the\n registry for this guarantees us the correct answer for all CSIDL_*\n names.\n \"\"\"\n import winreg as _winreg\n\n shell_folder_name = {\n \"CSIDL_APPDATA\": \"AppData\",\n \"CSIDL_COMMON_APPDATA\": \"Common AppData\",\n \"CSIDL_LOCAL_APPDATA\": \"Local AppData\",\n }[csidl_name]\n\n key = _winreg.OpenKey(\n _winreg.HKEY_CURRENT_USER,\n r\"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\"\n )\n dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n return dir\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 144, "name": "OpenKey", "kind": "ref", "category": "function", "info": " key = _winreg.OpenKey(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 148, "name": "QueryValueEx", "kind": "ref", "category": "function", "info": " dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 152, "name": "_get_win_folder_with_pywin32", "kind": "def", "category": "function", "info": "def _get_win_folder_with_pywin32(csidl_name):\n from win32com.shell import shellcon, shell\n dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n # Try to make this a unicode path because SHGetFolderPath does\n # not return unicode strings when there is unicode data in the\n # path.\n try:\n dir = unicode(dir)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n try:\n import win32api\n dir = win32api.GetShortPathName(dir)\n except ImportError:\n pass\n except UnicodeError:\n pass\n return dir\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 154, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 159, "name": "unicode", "kind": "ref", "category": "function", "info": " dir = unicode(dir)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 171, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " dir = win32api.GetShortPathName(dir)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 179, "name": "_get_win_folder_with_ctypes", "kind": "def", "category": "function", "info": "def _get_win_folder_with_ctypes(csidl_name):\n import ctypes\n\n csidl_const = {\n \"CSIDL_APPDATA\": 26,\n \"CSIDL_COMMON_APPDATA\": 35,\n \"CSIDL_LOCAL_APPDATA\": 28,\n }[csidl_name]\n\n buf = ctypes.create_unicode_buffer(1024)\n ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in buf:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf2 = ctypes.create_unicode_buffer(1024)\n if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n buf = buf2\n\n return buf.value\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 189, "name": "SHGetFolderPathW", "kind": "ref", "category": "function", "info": " ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 200, "name": "GetShortPathNameW", "kind": "ref", "category": "function", "info": " if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 205, "name": "_get_win_folder_with_jna", "kind": "def", "category": "function", "info": "def _get_win_folder_with_jna(csidl_name):\n import array\n from com.sun import jna\n from com.sun.jna.platform import win32\n\n buf_size = win32.WinDef.MAX_PATH * 2\n buf = array.zeros('c', buf_size)\n shell = win32.Shell32.INSTANCE\n shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf = array.zeros('c', buf_size)\n kernel = win32.Kernel32.INSTANCE\n if kernel.GetShortPathName(dir, buf, buf_size):\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n return dir\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 211, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 213, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 214, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 214, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 224, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 226, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " if kernel.GetShortPathName(dir, buf, buf_size):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 227, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 227, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 39, "name": "strip_blank_lines", "kind": "def", "category": "function", "info": "def strip_blank_lines(l):\n \"Remove leading and trailing blank lines from a list of lines\"\n while l and not l[0].strip():\n del l[0]\n while l and not l[-1].strip():\n del l[-1]\n return l\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 48, "name": "Reader", "kind": "def", "category": "class", "info": "__init__\t__getitem__\treset\tread\tseek_next_non_empty_line\teof\tread_to_condition\tread_to_next_empty_line\tread_to_next_unindented_line\tpeek\tis_empty"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 65, "name": "reset", "kind": "ref", "category": "function", "info": " self.reset()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 70, "name": "reset", "kind": "def", "category": "function", "info": " def reset(self):\n self._l = 0 # current line nr\n\n def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 73, "name": "read", "kind": "def", "category": "function", "info": " def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 74, "name": "eof", "kind": "ref", "category": "function", "info": " if not self.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 81, "name": "seek_next_non_empty_line", "kind": "def", "category": "function", "info": " def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 88, "name": "eof", "kind": "def", "category": "function", "info": " def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 91, "name": "read_to_condition", "kind": "def", "category": "function", "info": " def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 94, "name": "condition_func", "kind": "ref", "category": "function", "info": " if condition_func(line):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 97, "name": "eof", "kind": "ref", "category": "function", "info": " if self.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 101, "name": "read_to_next_empty_line", "kind": "def", "category": "function", "info": " def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 102, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self.seek_next_non_empty_line()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 104, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 107, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_empty)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 109, "name": "read_to_next_unindented_line", "kind": "def", "category": "function", "info": " def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 110, "name": "is_unindented", "kind": "def", "category": "function", "info": " def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 112, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_unindented)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 114, "name": "peek", "kind": "def", "category": "function", "info": " def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 120, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 124, "name": "ParseError", "kind": "def", "category": "class", "info": "__str__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 135, "name": "NumpyDocString", "kind": "def", "category": "class", "info": "__init__\t__getitem__\t__setitem__\t__iter__\t__len__\t_is_at_section\t_strip\t_read_to_next_section\t_read_sections\t_parse_param_list\t_parse_see_also\t_parse_index\t_parse_summary\t_parse\t_error_location\t_str_header\t_str_indent\t_str_signature\t_str_summary\t_str_extended_summary\t_str_param_list\t_str_section\t_str_see_also\t_str_index\t__str__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 167, "name": "Reader", "kind": "ref", "category": "function", "info": " self._doc = Reader(docstring)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 171, "name": "_parse", "kind": "ref", "category": "function", "info": " self._parse()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 181, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"Unknown section {key}\", error=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 191, "name": "_is_at_section", "kind": "def", "category": "function", "info": " def _is_at_section(self):\n self._doc.seek_next_non_empty_line()\n\n if self._doc.eof():\n return False\n\n l1 = self._doc.peek().strip() # e.g. Parameters\n\n if l1.startswith('.. index::'):\n return True\n\n l2 = self._doc.peek(1).strip() # ---------- or ==========\n return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))\n\n def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 192, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self._doc.seek_next_non_empty_line()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 194, "name": "eof", "kind": "ref", "category": "function", "info": " if self._doc.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 197, "name": "peek", "kind": "ref", "category": "function", "info": " l1 = self._doc.peek().strip() # e.g. Parameters\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 202, "name": "peek", "kind": "ref", "category": "function", "info": " l2 = self._doc.peek(1).strip() # ---------- or ==========\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 205, "name": "_strip", "kind": "def", "category": "function", "info": " def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 218, "name": "_read_to_next_section", "kind": "def", "category": "function", "info": " def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 219, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 222, "name": "peek", "kind": "ref", "category": "function", "info": " if not self._doc.peek(-1).strip(): # previous line was empty\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 225, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section += self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 229, "name": "_read_sections", "kind": "def", "category": "function", "info": " def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 230, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._doc.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 231, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " data = self._read_to_next_section()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 239, "name": "_strip", "kind": "ref", "category": "function", "info": " yield name, self._strip(data[2:])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 241, "name": "_parse_param_list", "kind": "def", "category": "function", "info": " def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 242, "name": "Reader", "kind": "ref", "category": "function", "info": " r = Reader(content)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 244, "name": "eof", "kind": "ref", "category": "function", "info": " while not r.eof():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 245, "name": "read", "kind": "ref", "category": "function", "info": " header = r.read().strip()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 254, "name": "read_to_next_unindented_line", "kind": "ref", "category": "function", "info": " desc = r.read_to_next_unindented_line()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 255, "name": "dedent_lines", "kind": "ref", "category": "function", "info": " desc = dedent_lines(desc)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 256, "name": "strip_blank_lines", "kind": "ref", "category": "function", "info": " desc = strip_blank_lines(desc)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 298, "name": "_parse_see_also", "kind": "def", "category": "function", "info": " def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 309, "name": "parse_item_name", "kind": "def", "category": "function", "info": " def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 313, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{text} is not a item name\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 314, "name": "group", "kind": "ref", "category": "function", "info": " role = m.group('role')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 316, "name": "end", "kind": "ref", "category": "function", "info": " return name, role, m.end()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 326, "name": "group", "kind": "ref", "category": "function", "info": " description = line_match.group('desc')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 327, "name": "group", "kind": "ref", "category": "function", "info": " if line_match.group('trailing') and description:\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 328, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 330, "name": "end", "kind": "ref", "category": "function", "info": " 'line \"%s\"' % (line_match.end('trailing'), line),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 336, "name": "group", "kind": "ref", "category": "function", "info": " text = line_match.group('allfuncs')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 340, "name": "parse_item_name", "kind": "ref", "category": "function", "info": " name, role, match_end = parse_item_name(text)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 348, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{line} is not a item name\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 351, "name": "_parse_index", "kind": "def", "category": "function", "info": " def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 357, "name": "strip_each_in", "kind": "def", "category": "function", "info": " def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 363, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out['default'] = strip_each_in(section[1].split(','))[0]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 367, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out[line[1]] = strip_each_in(line[2].split(','))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 370, "name": "_parse_summary", "kind": "def", "category": "function", "info": " def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 372, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if self._is_at_section():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 377, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " summary = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 382, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 389, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 390, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " self['Extended Summary'] = self._read_to_next_section()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 392, "name": "_parse", "kind": "def", "category": "function", "info": " def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 393, "name": "reset", "kind": "ref", "category": "function", "info": " self._doc.reset()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 394, "name": "_parse_summary", "kind": "ref", "category": "function", "info": " self._parse_summary()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 396, "name": "_read_sections", "kind": "ref", "category": "function", "info": " sections = list(self._read_sections())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 414, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"The section {section} appears twice\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 418, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(content)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 420, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 423, "name": "_parse_index", "kind": "ref", "category": "function", "info": " self['index'] = self._parse_index(section, content)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 425, "name": "_parse_see_also", "kind": "ref", "category": "function", "info": " self['See Also'] = self._parse_see_also(content)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 429, "name": "_error_location", "kind": "def", "category": "function", "info": " def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 444, "name": "_str_header", "kind": "def", "category": "function", "info": " def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 447, "name": "_str_indent", "kind": "def", "category": "function", "info": " def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 453, "name": "_str_signature", "kind": "def", "category": "function", "info": " def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 459, "name": "_str_summary", "kind": "def", "category": "function", "info": " def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 465, "name": "_str_extended_summary", "kind": "def", "category": "function", "info": " def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 471, "name": "_str_param_list", "kind": "def", "category": "function", "info": " def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 474, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 483, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent(param.desc)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 487, "name": "_str_section", "kind": "def", "category": "function", "info": " def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 490, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 495, "name": "_str_see_also", "kind": "def", "category": "function", "info": " def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 499, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(\"See Also\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 516, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([' '.join(desc)])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 520, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([self.empty_description])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 527, "name": "_str_index", "kind": "def", "category": "function", "info": " def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 547, "name": "_str_signature", "kind": "ref", "category": "function", "info": " out += self._str_signature()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 548, "name": "_str_summary", "kind": "ref", "category": "function", "info": " out += self._str_summary()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 549, "name": "_str_extended_summary", "kind": "ref", "category": "function", "info": " out += self._str_extended_summary()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 552, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 553, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section('Warnings')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 554, "name": "_str_see_also", "kind": "ref", "category": "function", "info": " out += self._str_see_also(func_role)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 556, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section(s)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 558, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 559, "name": "_str_index", "kind": "ref", "category": "function", "info": " out += self._str_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 571, "name": "dedent_lines", "kind": "def", "category": "function", "info": "def dedent_lines(lines):\n \"\"\"Deindent a list of lines maximally\"\"\"\n return textwrap.dedent(\"\\n\".join(lines)).split(\"\\n\")\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 576, "name": "header", "kind": "def", "category": "function", "info": "def header(text, style='-'):\n return text + '\\n' + style*len(text) + '\\n'\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 580, "name": "FunctionDoc", "kind": "def", "category": "class", "info": "__init__\tget_func\t__str__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 592, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 608, "name": "get_func", "kind": "def", "category": "function", "info": " def get_func(self):\n func_name = getattr(self._f, '__name__', self.__class__.__name__)\n if inspect.isclass(self._f):\n func = getattr(self._f, '__call__', self._f.__init__)\n else:\n func = self._f\n return func, func_name\n\n def __str__(self):\n out = ''\n\n func, func_name = self.get_func()\n\n roles = {'func': 'function',\n 'meth': 'method'}\n\n if self._role:\n if self._role not in roles:\n print(f\"Warning: invalid role {self._role}\")\n out += f\".. {roles.get(self._role, '')}:: {func_name}\\n \\n\\n\"\n\n out += super().__str__(func_role=self._role)\n return out\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 619, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 633, "name": "ClassDoc", "kind": "def", "category": "class", "info": "__init__\tmethods\tproperties\t_is_show_member"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 668, "name": "splitlines_x", "kind": "def", "category": "function", "info": " def splitlines_x(s):\n if not s:\n return []\n else:\n return s.splitlines()\n for field, items in [('Methods', self.methods),\n ('Attributes', self.properties)]:\n if not self[field]:\n doc_list = []\n for name in sorted(items):\n if (name in _exclude or\n (_members and name not in _members)):\n continue\n try:\n doc_item = pydoc.getdoc(getattr(self._cls, name))\n doc_list.append(\n Parameter(name, '', splitlines_x(doc_item)))\n except AttributeError:\n pass # method doesn't exist\n self[field] = doc_list\n\n @property\n def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 684, "name": "splitlines_x", "kind": "ref", "category": "function", "info": " Parameter(name, '', splitlines_x(doc_item)))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 690, "name": "methods", "kind": "def", "category": "function", "info": " def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 697, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 700, "name": "properties", "kind": "def", "category": "function", "info": " def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 707, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 709, "name": "_is_show_member", "kind": "def", "category": "function", "info": " def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 30, "name": "husl_to_rgb", "kind": "def", "category": "function", "info": "def husl_to_rgb(h, s, l):\n return lch_to_rgb(*husl_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "husl_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 34, "name": "husl_to_hex", "kind": "def", "category": "function", "info": "def husl_to_hex(h, s, l):\n return rgb_to_hex(husl_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 38, "name": "rgb_to_husl", "kind": "def", "category": "function", "info": "def rgb_to_husl(r, g, b):\n return lch_to_husl(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "lch_to_husl", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 42, "name": "hex_to_husl", "kind": "def", "category": "function", "info": "def hex_to_husl(hex):\n return rgb_to_husl(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 46, "name": "huslp_to_rgb", "kind": "def", "category": "function", "info": "def huslp_to_rgb(h, s, l):\n return lch_to_rgb(*huslp_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "huslp_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 50, "name": "huslp_to_hex", "kind": "def", "category": "function", "info": "def huslp_to_hex(h, s, l):\n return rgb_to_hex(huslp_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "huslp_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 54, "name": "rgb_to_huslp", "kind": "def", "category": "function", "info": "def rgb_to_huslp(r, g, b):\n return lch_to_huslp(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "lch_to_huslp", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 58, "name": "hex_to_huslp", "kind": "def", "category": "function", "info": "def hex_to_huslp(hex):\n return rgb_to_huslp(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "rgb_to_huslp", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 62, "name": "lch_to_rgb", "kind": "def", "category": "function", "info": "def lch_to_rgb(l, c, h):\n return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "xyz_to_rgb", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "luv_to_xyz", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "lch_to_luv", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 66, "name": "rgb_to_lch", "kind": "def", "category": "function", "info": "def rgb_to_lch(r, g, b):\n return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "luv_to_lch", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "xyz_to_luv", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "rgb_to_xyz", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 70, "name": "max_chroma", "kind": "def", "category": "function", "info": "def max_chroma(L, H):\n hrad = math.radians(H)\n sinH = (math.sin(hrad))\n cosH = (math.cos(hrad))\n sub1 = (math.pow(L + 16, 3.0) / 1560896.0)\n sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)\n result = float(\"inf\")\n for row in m:\n m1 = row[0]\n m2 = row[1]\n m3 = row[2]\n top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)\n rbottom = (0.86330 * m3 - 0.17266 * m2)\n lbottom = (0.12949 * m3 - 0.38848 * m1)\n bottom = (rbottom * sinH + lbottom * cosH) * sub2\n\n for t in (0.0, 1.0):\n C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))\n if C > 0.0 and C < result:\n result = C\n return result\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 93, "name": "_hrad_extremum", "kind": "def", "category": "function", "info": "def _hrad_extremum(L):\n lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0\n rhs = 1107.0 / 125000.0\n sub = lhs if lhs > rhs else 10.0 * L / 9033.0\n chroma = float(\"inf\")\n result = None\n for row in m:\n for limit in (0.0, 1.0):\n [m1, m2, m3] = row\n top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit\n bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub\n hrad = math.atan2(top, bottom)\n # This is a math hack to deal with tan quadrants, I'm too lazy to figure\n # out how to do this properly\n if limit == 0.0:\n hrad += math.pi\n test = max_chroma(L, math.degrees(hrad))\n if test < chroma:\n chroma = test\n result = hrad\n return result\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 109, "name": "max_chroma", "kind": "ref", "category": "function", "info": " test = max_chroma(L, math.degrees(hrad))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 116, "name": "max_chroma_pastel", "kind": "def", "category": "function", "info": "def max_chroma_pastel(L):\n H = math.degrees(_hrad_extremum(L))\n return max_chroma(L, H)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 117, "name": "_hrad_extremum", "kind": "ref", "category": "function", "info": " H = math.degrees(_hrad_extremum(L))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 118, "name": "max_chroma", "kind": "ref", "category": "function", "info": " return max_chroma(L, H)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 121, "name": "dot_product", "kind": "def", "category": "function", "info": "def dot_product(a, b):\n return sum(map(operator.mul, a, b))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 125, "name": "f", "kind": "def", "category": "function", "info": "def f(t):\n if t > lab_e:\n return (math.pow(t, 1.0 / 3.0))\n else:\n return (7.787 * t + 16.0 / 116.0)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 132, "name": "f_inv", "kind": "def", "category": "function", "info": "def f_inv(t):\n if math.pow(t, 3.0) > lab_e:\n return (math.pow(t, 3.0))\n else:\n return (116.0 * t - 16.0) / lab_k\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 139, "name": "from_linear", "kind": "def", "category": "function", "info": "def from_linear(c):\n if c <= 0.0031308:\n return 12.92 * c\n else:\n return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 146, "name": "to_linear", "kind": "def", "category": "function", "info": "def to_linear(c):\n a = 0.055\n\n if c > 0.04045:\n return (math.pow((c + a) / (1.0 + a), 2.4))\n else:\n return (c / 12.92)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 155, "name": "rgb_prepare", "kind": "def", "category": "function", "info": "def rgb_prepare(triple):\n ret = []\n for ch in triple:\n ch = round(ch, 3)\n\n if ch < -0.0001 or ch > 1.0001:\n raise Exception(f\"Illegal RGB value {ch:f}\")\n\n if ch < 0:\n ch = 0\n if ch > 1:\n ch = 1\n\n # Fix for Python 3 which by default rounds 4.5 down to 4.0\n # instead of Python 2 which is rounded to 5.0 which caused\n # a couple off by one errors in the tests. Tests now all pass\n # in Python 2 and Python 3\n ret.append(int(round(ch * 255 + 0.001, 0)))\n\n return ret\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 177, "name": "hex_to_rgb", "kind": "def", "category": "function", "info": "def hex_to_rgb(hex):\n if hex.startswith('#'):\n hex = hex[1:]\n r = int(hex[0:2], 16) / 255.0\n g = int(hex[2:4], 16) / 255.0\n b = int(hex[4:6], 16) / 255.0\n return [r, g, b]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 186, "name": "rgb_to_hex", "kind": "def", "category": "function", "info": "def rgb_to_hex(triple):\n [r, g, b] = triple\n return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 188, "name": "rgb_prepare", "kind": "ref", "category": "function", "info": " return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 191, "name": "xyz_to_rgb", "kind": "def", "category": "function", "info": "def xyz_to_rgb(triple):\n xyz = map(lambda row: dot_product(row, triple), m)\n return list(map(from_linear, xyz))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 192, "name": "dot_product", "kind": "ref", "category": "function", "info": " xyz = map(lambda row: dot_product(row, triple), m)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 196, "name": "rgb_to_xyz", "kind": "def", "category": "function", "info": "def rgb_to_xyz(triple):\n rgbl = list(map(to_linear, triple))\n return list(map(lambda row: dot_product(row, rgbl), m_inv))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 198, "name": "dot_product", "kind": "ref", "category": "function", "info": " return list(map(lambda row: dot_product(row, rgbl), m_inv))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 201, "name": "xyz_to_luv", "kind": "def", "category": "function", "info": "def xyz_to_luv(triple):\n X, Y, Z = triple\n\n if X == Y == Z == 0.0:\n return [0.0, 0.0, 0.0]\n\n varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))\n varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))\n L = 116.0 * f(Y / refY) - 16.0\n\n # Black will create a divide-by-zero error\n if L == 0.0:\n return [0.0, 0.0, 0.0]\n\n U = 13.0 * L * (varU - refU)\n V = 13.0 * L * (varV - refV)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 209, "name": "f", "kind": "ref", "category": "function", "info": " L = 116.0 * f(Y / refY) - 16.0\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 221, "name": "luv_to_xyz", "kind": "def", "category": "function", "info": "def luv_to_xyz(triple):\n L, U, V = triple\n\n if L == 0:\n return [0.0, 0.0, 0.0]\n\n varY = f_inv((L + 16.0) / 116.0)\n varU = U / (13.0 * L) + refU\n varV = V / (13.0 * L) + refV\n Y = varY * refY\n X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)\n Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)\n\n return [X, Y, Z]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 227, "name": "f_inv", "kind": "ref", "category": "function", "info": " varY = f_inv((L + 16.0) / 116.0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 237, "name": "luv_to_lch", "kind": "def", "category": "function", "info": "def luv_to_lch(triple):\n L, U, V = triple\n\n C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))\n hrad = (math.atan2(V, U))\n H = math.degrees(hrad)\n if H < 0.0:\n H = 360.0 + H\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 249, "name": "lch_to_luv", "kind": "def", "category": "function", "info": "def lch_to_luv(triple):\n L, C, H = triple\n\n Hrad = math.radians(H)\n U = (math.cos(Hrad) * C)\n V = (math.sin(Hrad) * C)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 259, "name": "husl_to_lch", "kind": "def", "category": "function", "info": "def husl_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma(L, H)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 267, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 273, "name": "lch_to_husl", "kind": "def", "category": "function", "info": "def lch_to_husl(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma(L, H)\n S = C / mx * 100.0\n\n return [H, S, L]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 281, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 287, "name": "huslp_to_lch", "kind": "def", "category": "function", "info": "def huslp_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma_pastel(L)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 295, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 301, "name": "lch_to_huslp", "kind": "def", "category": "function", "info": "def lch_to_huslp(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma_pastel(L)\n S = C / mx * 100.0\n\n return [H, S, L]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 309, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 80, "name": "gaussian_kde", "kind": "def", "category": "class", "info": "__init__\tevaluate\tscotts_factor\tsilverman_factor\tset_bandwidth\t_compute_covariance\tpdf\tweights\tneff"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 201, "name": "astype", "kind": "ref", "category": "function", "info": " self._weights = atleast_1d(weights).astype(float)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 209, "name": "set_bandwidth", "kind": "ref", "category": "function", "info": " self.set_bandwidth(bw_method=bw_method)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 211, "name": "evaluate", "kind": "def", "category": "function", "info": " def evaluate(self, points):\n \"\"\"Evaluate the estimated pdf on a set of points.\n\n Parameters\n ----------\n points : (# of dimensions, # of points)-array\n Alternatively, a (# of dimensions,) vector can be passed in and\n treated as a single point.\n\n Returns\n -------\n values : (# of points,)-array\n The values at each point.\n\n Raises\n ------\n ValueError : if the dimensionality of the input points is different than\n the dimensionality of the KDE.\n\n \"\"\"\n points = atleast_2d(asarray(points))\n\n d, m = points.shape\n if d != self.d:\n if d == 1 and m == self.d:\n # points was passed in as a row vector\n points = reshape(points, (self.d, 1))\n m = 1\n else:\n msg = f\"points have dimension {d}, dataset has dimension {self.d}\"\n raise ValueError(msg)\n\n output_dtype = np.common_type(self.covariance, points)\n result = zeros((m,), dtype=output_dtype)\n\n whitening = linalg.cholesky(self.inv_cov)\n scaled_dataset = dot(whitening, self.dataset)\n scaled_points = dot(whitening, points)\n\n if m >= self.n:\n # there are more points than data, so loop over data\n for i in range(self.n):\n diff = scaled_dataset[:, i, newaxis] - scaled_points\n energy = sum(diff * diff, axis=0) / 2.0\n result += self.weights[i]*exp(-energy)\n else:\n # loop over points\n for i in range(m):\n diff = scaled_dataset - scaled_points[:, i, newaxis]\n energy = sum(diff * diff, axis=0) / 2.0\n result[i] = sum(exp(-energy)*self.weights, axis=0)\n\n result = result / self._norm_factor\n\n return result\n\n __call__ = evaluate\n\n def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 269, "name": "scotts_factor", "kind": "def", "category": "function", "info": " def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 279, "name": "silverman_factor", "kind": "def", "category": "function", "info": " def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 297, "name": "set_bandwidth", "kind": "def", "category": "function", "info": " def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 329, "name": "_bw_method", "kind": "ref", "category": "function", "info": " self.covariance_factor = lambda: self._bw_method(self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 335, "name": "_compute_covariance", "kind": "ref", "category": "function", "info": " self._compute_covariance()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 337, "name": "_compute_covariance", "kind": "def", "category": "function", "info": " def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 341, "name": "covariance_factor", "kind": "ref", "category": "function", "info": " self.factor = self.covariance_factor()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 353, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 363, "name": "evaluate", "kind": "ref", "category": "function", "info": " return self.evaluate(x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 366, "name": "weights", "kind": "def", "category": "function", "info": " def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 374, "name": "neff", "kind": "def", "category": "function", "info": " def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 33, "name": "InfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 58, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> \"NegativeInfinityType\":\n return NegativeInfinity\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 62, "name": "InfinityType", "kind": "ref", "category": "function", "info": "Infinity = InfinityType()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 65, "name": "NegativeInfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 90, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> InfinityType:\n return Infinity\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 94, "name": "NegativeInfinityType", "kind": "ref", "category": "function", "info": "NegativeInfinity = NegativeInfinityType()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 127, "name": "InvalidVersion", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 133, "name": "_BaseVersion", "kind": "def", "category": "class", "info": "__hash__\t__lt__\t__le__\t__eq__\t__ge__\t__gt__\t__ne__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 213, "name": "Version", "kind": "def", "category": "class", "info": "__init__\t__repr__\t__str__\tepoch\trelease\tpre\tpost\tdev\tlocal\tpublic\tbase_version\tis_prerelease\tis_postrelease\tis_devrelease\tmajor\tminor\tmicro"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 222, "name": "InvalidVersion", "kind": "ref", "category": "function", "info": " raise InvalidVersion(f\"Invalid version: '{version}'\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 225, "name": "_Version", "kind": "ref", "category": "function", "info": " self._version = _Version(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 227, "name": "group", "kind": "ref", "category": "function", "info": " release=tuple(int(i) for i in match.group(\"release\").split(\".\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 229, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " post=_parse_letter_version(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "_parse_local_version", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "group", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 237, "name": "_cmpkey", "kind": "ref", "category": "function", "info": " self._key = _cmpkey(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 278, "name": "epoch", "kind": "def", "category": "function", "info": " def epoch(self) -> int:\n _epoch: int = self._version.epoch\n return _epoch\n\n @property\n def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 283, "name": "release", "kind": "def", "category": "function", "info": " def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 288, "name": "pre", "kind": "def", "category": "function", "info": " def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 293, "name": "post", "kind": "def", "category": "function", "info": " def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 297, "name": "dev", "kind": "def", "category": "function", "info": " def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 301, "name": "local", "kind": "def", "category": "function", "info": " def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 308, "name": "public", "kind": "def", "category": "function", "info": " def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 312, "name": "base_version", "kind": "def", "category": "function", "info": " def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 325, "name": "is_prerelease", "kind": "def", "category": "function", "info": " def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 329, "name": "is_postrelease", "kind": "def", "category": "function", "info": " def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 333, "name": "is_devrelease", "kind": "def", "category": "function", "info": " def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 337, "name": "major", "kind": "def", "category": "function", "info": " def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 341, "name": "minor", "kind": "def", "category": "function", "info": " def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 345, "name": "micro", "kind": "def", "category": "function", "info": " def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 349, "name": "_parse_letter_version", "kind": "def", "category": "function", "info": "def _parse_letter_version(\n letter: str, number: Union[str, bytes, SupportsInt]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 388, "name": "_parse_local_version", "kind": "def", "category": "function", "info": "def _parse_local_version(local: str) -> Optional[LocalType]:\n \"\"\"\n Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").\n \"\"\"\n if local is not None:\n return tuple(\n part.lower() if not part.isdigit() else int(part)\n for part in _local_version_separators.split(local)\n )\n return None\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 400, "name": "_cmpkey", "kind": "def", "category": "function", "info": "def _cmpkey(\n epoch: int,\n release: Tuple[int, ...],\n pre: Optional[Tuple[str, int]],\n post: Optional[Tuple[str, int]],\n dev: Optional[Tuple[str, int]],\n local: Optional[Tuple[SubLocalType]],\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 30, "name": "_index_to_label", "kind": "def", "category": "function", "info": "def _index_to_label(index):\n \"\"\"Convert a pandas index or multiindex to an axis label.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return \"-\".join(map(to_utf8, index.names))\n else:\n return index.name\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 38, "name": "_index_to_ticklabels", "kind": "def", "category": "function", "info": "def _index_to_ticklabels(index):\n \"\"\"Convert a pandas index or multiindex into ticklabels.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return [\"-\".join(map(to_utf8, i)) for i in index.values]\n else:\n return index.values\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 46, "name": "_convert_colors", "kind": "def", "category": "function", "info": "def _convert_colors(colors):\n \"\"\"Convert either a list of colors or nested lists of colors to RGB.\"\"\"\n to_rgb = mpl.colors.to_rgb\n\n try:\n to_rgb(colors[0])\n # If this works, there is only one level of colors\n return list(map(to_rgb, colors))\n except ValueError:\n # If we get here, we have nested lists\n return [list(map(to_rgb, l)) for l in colors]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 51, "name": "to_rgb", "kind": "ref", "category": "function", "info": " to_rgb(colors[0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 59, "name": "_matrix_mask", "kind": "def", "category": "function", "info": "def _matrix_mask(data, mask):\n \"\"\"Ensure that data and mask are compatible and add missing values.\n\n Values will be plotted for cells where ``mask`` is ``False``.\n\n ``data`` is expected to be a DataFrame; ``mask`` can be an array or\n a DataFrame.\n\n \"\"\"\n if mask is None:\n mask = np.zeros(data.shape, bool)\n\n if isinstance(mask, np.ndarray):\n # For array masks, ensure that shape matches data then convert\n if mask.shape != data.shape:\n raise ValueError(\"Mask must have the same shape as data.\")\n\n mask = pd.DataFrame(mask,\n index=data.index,\n columns=data.columns,\n dtype=bool)\n\n elif isinstance(mask, pd.DataFrame):\n # For DataFrame masks, ensure that semantic labels match data\n if not mask.index.equals(data.index) \\\n and mask.columns.equals(data.columns):\n err = \"Mask must have the same index and columns as data.\"\n raise ValueError(err)\n\n # Add any cells with missing data to the mask\n # This works around an issue where `plt.pcolormesh` doesn't represent\n # missing data properly\n mask = mask | pd.isnull(data)\n\n return mask\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 83, "name": "equals", "kind": "ref", "category": "function", "info": " if not mask.index.equals(data.index) \\\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 84, "name": "equals", "kind": "ref", "category": "function", "info": " and mask.columns.equals(data.columns):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 96, "name": "_HeatMapper", "kind": "def", "category": "class", "info": "__init__\t_determine_cmap_params\t_annotate_heatmap\t_skip_ticks\t_auto_ticks\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 112, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " mask = _matrix_mask(data, mask)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 114, "name": "masked_where", "kind": "ref", "category": "function", "info": " plot_data = np.ma.masked_where(np.asarray(mask), plot_data)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 120, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 122, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 129, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 131, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 140, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 142, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.xticks, self.xticklabels = self._skip_ticks(xticklabels,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 150, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 152, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.yticks, self.yticklabels = self._skip_ticks(yticklabels,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 156, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " xlabel = _index_to_label(data.columns)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 157, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " ylabel = _index_to_label(data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 162, "name": "_determine_cmap_params", "kind": "ref", "category": "function", "info": " self._determine_cmap_params(plot_data, vmin, vmax,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 191, "name": "_determine_cmap_params", "kind": "def", "category": "function", "info": " def _determine_cmap_params(self, plot_data, vmin, vmax,\n cmap, center, robust):\n \"\"\"Use some heuristics to set good defaults for colorbar and range.\"\"\"\n\n # plot_data is a np.ma.array instance\n calc_data = plot_data.astype(float).filled(np.nan)\n if vmin is None:\n if robust:\n vmin = np.nanpercentile(calc_data, 2)\n else:\n vmin = np.nanmin(calc_data)\n if vmax is None:\n if robust:\n vmax = np.nanpercentile(calc_data, 98)\n else:\n vmax = np.nanmax(calc_data)\n self.vmin, self.vmax = vmin, vmax\n\n # Choose default colormaps if not provided\n if cmap is None:\n if center is None:\n self.cmap = cm.rocket\n else:\n self.cmap = cm.icefire\n elif isinstance(cmap, str):\n self.cmap = get_colormap(cmap)\n elif isinstance(cmap, list):\n self.cmap = mpl.colors.ListedColormap(cmap)\n else:\n self.cmap = cmap\n\n # Recenter a divergent colormap\n if center is not None:\n\n # Copy bad values\n # in mpl<3.2 only masked values are honored with \"bad\" color spec\n # (see https://github.com/matplotlib/matplotlib/pull/14257)\n bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n\n # under/over values are set for sure when cmap extremes\n # do not map to the same color as +-inf\n under = self.cmap(-np.inf)\n over = self.cmap(np.inf)\n under_set = under != self.cmap(0)\n over_set = over != self.cmap(self.cmap.N - 1)\n\n vrange = max(vmax - center, center - vmin)\n normlize = mpl.colors.Normalize(center - vrange, center + vrange)\n cmin, cmax = normlize([vmin, vmax])\n cc = np.linspace(cmin, cmax, 256)\n self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n self.cmap.set_bad(bad)\n if under_set:\n self.cmap.set_under(under)\n if over_set:\n self.cmap.set_over(over)\n\n def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 196, "name": "astype", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 196, "name": "filled", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 216, "name": "get_colormap", "kind": "ref", "category": "function", "info": " self.cmap = get_colormap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 218, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 228, "name": "cmap", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 228, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 232, "name": "cmap", "kind": "ref", "category": "function", "info": " under = self.cmap(-np.inf)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 233, "name": "cmap", "kind": "ref", "category": "function", "info": " over = self.cmap(np.inf)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 234, "name": "cmap", "kind": "ref", "category": "function", "info": " under_set = under != self.cmap(0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 235, "name": "cmap", "kind": "ref", "category": "function", "info": " over_set = over != self.cmap(self.cmap.N - 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 239, "name": "normlize", "kind": "ref", "category": "function", "info": " cmin, cmax = normlize([vmin, vmax])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "cmap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 242, "name": "set_bad", "kind": "ref", "category": "function", "info": " self.cmap.set_bad(bad)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 244, "name": "set_under", "kind": "ref", "category": "function", "info": " self.cmap.set_under(under)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 246, "name": "set_over", "kind": "ref", "category": "function", "info": " self.cmap.set_over(over)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 248, "name": "_annotate_heatmap", "kind": "def", "category": "function", "info": " def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 257, "name": "relative_luminance", "kind": "ref", "category": "function", "info": " lum = relative_luminance(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 264, "name": "_skip_ticks", "kind": "def", "category": "function", "info": " def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 277, "name": "_auto_ticks", "kind": "def", "category": "function", "info": " def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if \"norm\" not in kws:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 279, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = ax.figure.dpi_scale_trans.inverted()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 280, "name": "transformed", "kind": "ref", "category": "function", "info": " bbox = ax.get_window_extent().transformed(transform)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 283, "name": "set_ticks", "kind": "ref", "category": "function", "info": " tick, = axis.set_ticks([0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 284, "name": "get_size", "kind": "ref", "category": "function", "info": " fontsize = tick.label1.get_size()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 290, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " ticks, labels = self._skip_ticks(labels, tick_every)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 296, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 311, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 324, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 329, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 334, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(xticklabels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 335, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 339, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 341, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 343, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 351, "name": "_annotate_heatmap", "kind": "ref", "category": "function", "info": " self._annotate_heatmap(ax, mesh)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 354, "name": "heatmap", "kind": "def", "category": "function", "info": "def heatmap(\n data, *,\n vmin=None, vmax=None, cmap=None, center=None, robust=False,\n annot=None, fmt=\".2g\", annot_kws=None,\n linewidths=0, linecolor=\"white\",\n cbar=True, cbar_kws=None, cbar_ax=None,\n square=False, xticklabels=\"auto\", yticklabels=\"auto\",\n mask=None, ax=None,\n **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 445, "name": "_HeatMapper", "kind": "ref", "category": "function", "info": " plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 457, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect(\"equal\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 462, "name": "_DendrogramPlotter", "kind": "def", "category": "class", "info": "__init__\t_calculate_linkage_scipy\t_calculate_linkage_fastcluster\tcalculated_linkage\tcalculate_dendrogram\treordered_ind\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 497, "name": "calculate_dendrogram", "kind": "ref", "category": "function", "info": " self.dendrogram = self.calculate_dendrogram()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 503, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " ticklabels = _index_to_ticklabels(self.data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 511, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.ylabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 519, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.xlabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 528, "name": "_calculate_linkage_scipy", "kind": "def", "category": "function", "info": " def _calculate_linkage_scipy(self):\n linkage = hierarchy.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 533, "name": "_calculate_linkage_fastcluster", "kind": "def", "category": "function", "info": " def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 542, "name": "linkage_vector", "kind": "ref", "category": "function", "info": " return fastcluster.linkage_vector(self.array,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 551, "name": "calculated_linkage", "kind": "def", "category": "function", "info": " def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.product(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 554, "name": "_calculate_linkage_fastcluster", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_fastcluster()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 561, "name": "_calculate_linkage_scipy", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_scipy()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 563, "name": "calculate_dendrogram", "kind": "def", "category": "function", "info": " def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 580, "name": "reordered_ind", "kind": "def", "category": "function", "info": " def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 604, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 609, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 613, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, number_of_leaves * 10)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 614, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 616, "name": "invert_xaxis", "kind": "ref", "category": "function", "info": " ax.invert_xaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 617, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 621, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, number_of_leaves * 10)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 622, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 624, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, bottom=True, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 628, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(self.xticklabels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 629, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 632, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 634, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 636, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 686, "name": "_DendrogramPlotter", "kind": "ref", "category": "function", "info": " plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 695, "name": "ClusterGrid", "kind": "def", "category": "class", "info": "__init__\t_preprocess_colors\tformat_data\tz_score\tstandard_scale\tdim_ratios\tcolor_list_to_matrix_and_cmap\tplot_dendrograms\tplot_colors\tplot_matrix\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 709, "name": "format_data", "kind": "ref", "category": "function", "info": " self.data2d = self.format_data(self.data, pivot_kws, z_score,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 712, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " self.mask = _matrix_mask(self.data2d, mask)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 717, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, row_colors, axis=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 719, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, col_colors, axis=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 731, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " width_ratios = self.dim_ratios(self.row_colors,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 734, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " height_ratios = self.dim_ratios(self.col_colors,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 745, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 746, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 747, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 748, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 754, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_colors = self._figure.add_subplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 757, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_colors = self._figure.add_subplot(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 760, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 766, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 773, "name": "_preprocess_colors", "kind": "def", "category": "function", "info": " def _preprocess_colors(self, data, colors, axis):\n \"\"\"Preprocess {row/col}_colors to extract labels and convert colors.\"\"\"\n labels = None\n\n if colors is not None:\n if isinstance(colors, (pd.DataFrame, pd.Series)):\n\n # If data is unindexed, raise\n if (not hasattr(data, \"index\") and axis == 0) or (\n not hasattr(data, \"columns\") and axis == 1\n ):\n axis_name = \"col\" if axis else \"row\"\n msg = (f\"{axis_name}_colors indices can't be matched with data \"\n f\"indices. Provide {axis_name}_colors as a non-indexed \"\n \"datatype, e.g. by using `.to_numpy()``\")\n raise TypeError(msg)\n\n # Ensure colors match data indices\n if axis == 0:\n colors = colors.reindex(data.index)\n else:\n colors = colors.reindex(data.columns)\n\n # Replace na's with white color\n # TODO We should set these to transparent instead\n colors = colors.astype(object).fillna('white')\n\n # Extract color values and labels from frame/series\n if isinstance(colors, pd.DataFrame):\n labels = list(colors.columns)\n colors = colors.T.values\n else:\n if colors.name is None:\n labels = [\"\"]\n else:\n labels = [colors.name]\n colors = colors.values\n\n colors = _convert_colors(colors)\n\n return colors, labels\n\n def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 792, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.index)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 794, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.columns)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 798, "name": "astype", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 798, "name": "fillna", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 811, "name": "_convert_colors", "kind": "ref", "category": "function", "info": " colors = _convert_colors(colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 815, "name": "format_data", "kind": "def", "category": "function", "info": " def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 830, "name": "z_score", "kind": "ref", "category": "function", "info": " data2d = self.z_score(data2d, z_score)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 832, "name": "standard_scale", "kind": "ref", "category": "function", "info": " data2d = self.standard_scale(data2d, standard_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 836, "name": "z_score", "kind": "def", "category": "function", "info": " def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 866, "name": "standard_scale", "kind": "def", "category": "function", "info": " def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 899, "name": "dim_ratios", "kind": "def", "category": "function", "info": " def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 918, "name": "color_list_to_matrix_and_cmap", "kind": "def", "category": "function", "info": " def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 942, "name": "to_rgb", "kind": "ref", "category": "function", "info": " mpl.colors.to_rgb(colors[0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 966, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(list(unique_colors))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 969, "name": "plot_dendrograms", "kind": "def", "category": "function", "info": " def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 979, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_xticks([])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 980, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_yticks([])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 989, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_xticks([])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 990, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_yticks([])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 991, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 992, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 994, "name": "plot_colors", "kind": "def", "category": "function", "info": " def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1020, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1029, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1034, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1036, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_row_colors, left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1040, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1049, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1054, "name": "tick_right", "kind": "ref", "category": "function", "info": " self.ax_col_colors.yaxis.tick_right()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1055, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1057, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_col_colors, left=True, bottom=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1059, "name": "plot_matrix", "kind": "def", "category": "function", "info": " def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1092, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1096, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1097, "name": "get_rotation", "kind": "ref", "category": "function", "info": " ytl_rot = None if not ytl else ytl[0].get_rotation()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1098, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1099, "name": "set_label_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_label_position('right')\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1101, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1111, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1113, "name": "set_axis_on", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_on()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1114, "name": "set_position", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_position(self.cbar_pos)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1128, "name": "plot_dendrograms", "kind": "ref", "category": "function", "info": " self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1140, "name": "plot_colors", "kind": "ref", "category": "function", "info": " self.plot_colors(xind, yind, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1141, "name": "plot_matrix", "kind": "ref", "category": "function", "info": " self.plot_matrix(colorbar_kws, xind, yind, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1145, "name": "clustermap", "kind": "def", "category": "function", "info": "def clustermap(\n data, *,\n pivot_kws=None, method='average', metric='euclidean',\n z_score=None, standard_scale=None, figsize=(10, 10),\n cbar_kws=None, row_cluster=True, col_cluster=True,\n row_linkage=None, col_linkage=None,\n row_colors=None, col_colors=None, mask=None,\n dendrogram_ratio=.2, colors_ratio=0.03,\n cbar_pos=(.02, .8, .05, .18), tree_kws=None,\n **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1251, "name": "ClusterGrid", "kind": "ref", "category": "function", "info": " plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 8, "name": "palplot", "kind": "def", "category": "function", "info": "def palplot(pal, size=1):\n \"\"\"Plot the values in a color palette as a horizontal array.\n\n Parameters\n ----------\n pal : sequence of matplotlib colors\n colors, i.e. as returned by seaborn.color_palette()\n size :\n scaling factor for size of plot\n\n \"\"\"\n n = len(pal)\n f, ax = plt.subplots(1, 1, figsize=(n * size, size))\n ax.imshow(np.arange(n).reshape(1, n),\n cmap=mpl.colors.ListedColormap(list(pal)),\n interpolation=\"nearest\", aspect=\"auto\")\n ax.set_xticks(np.arange(n) - .5)\n ax.set_yticks([-.5, .5])\n # Ensure nice border between colors\n ax.set_xticklabels([\"\" for _ in range(n)])\n # The proper way to set no ticks\n ax.yaxis.set_major_locator(ticker.NullLocator())\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 22, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap=mpl.colors.ListedColormap(list(pal)),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 24, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(n) - .5)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 25, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks([-.5, .5])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 27, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels([\"\" for _ in range(n)])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 29, "name": "set_major_locator", "kind": "ref", "category": "function", "info": " ax.yaxis.set_major_locator(ticker.NullLocator())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 32, "name": "dogplot", "kind": "def", "category": "function", "info": "def dogplot(*_, **__):\n \"\"\"Who's a good boy?\"\"\"\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n from io import BytesIO\n\n url = \"https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png\"\n pic = np.random.randint(2, 7)\n data = BytesIO(urlopen(url.format(pic)).read())\n img = plt.imread(data)\n f, ax = plt.subplots(figsize=(5, 5), dpi=100)\n f.subplots_adjust(0, 0, 1, 1)\n ax.imshow(img)\n ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 41, "name": "randint", "kind": "ref", "category": "function", "info": " pic = np.random.randint(2, 7)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 47, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 60, "name": "_ColorPalette", "kind": "def", "category": "class", "info": "__enter__\t__exit__\tas_hex\t_repr_html_"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 62, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n \"\"\"Open the context.\"\"\"\n from .rcmod import set_palette\n self._orig_palette = color_palette()\n set_palette(self)\n return self\n\n def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 65, "name": "color_palette", "kind": "ref", "category": "function", "info": " self._orig_palette = color_palette()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 66, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 69, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 72, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self._orig_palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 74, "name": "as_hex", "kind": "def", "category": "function", "info": " def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 76, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 77, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(hex)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 79, "name": "_repr_html_", "kind": "def", "category": "function", "info": " def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 84, "name": "as_hex", "kind": "ref", "category": "function", "info": " for i, c in enumerate(self.as_hex()):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 93, "name": "_patch_colormap_display", "kind": "def", "category": "function", "info": "def _patch_colormap_display():\n \"\"\"Simplify the rich display of matplotlib color maps in a notebook.\"\"\"\n def _repr_png_(self):\n \"\"\"Generate a PNG representation of the Colormap.\"\"\"\n import io\n from PIL import Image\n import numpy as np\n IMAGE_SIZE = (400, 50)\n X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))\n pixels = self(X, bytes=True)\n png_bytes = io.BytesIO()\n Image.fromarray(pixels).save(png_bytes, format='png')\n return png_bytes.getvalue()\n\n def _repr_html_(self):\n \"\"\"Generate an HTML representation of the Colormap.\"\"\"\n import base64\n png_bytes = self._repr_png_()\n png_base64 = base64.b64encode(png_bytes).decode('ascii')\n return ('')\n\n mpl.colors.Colormap._repr_png_ = _repr_png_\n mpl.colors.Colormap._repr_html_ = _repr_html_\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 95, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self):\n \"\"\"Generate a PNG representation of the Colormap.\"\"\"\n import io\n from PIL import Image\n import numpy as np\n IMAGE_SIZE = (400, 50)\n X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))\n pixels = self(X, bytes=True)\n png_bytes = io.BytesIO()\n Image.fromarray(pixels).save(png_bytes, format='png')\n return png_bytes.getvalue()\n\n def _repr_html_(self):\n \"\"\"Generate an HTML representation of the Colormap.\"\"\"\n import base64\n png_bytes = self._repr_png_()\n png_base64 = base64.b64encode(png_bytes).decode('ascii')\n return ('')\n\n mpl.colors.Colormap._repr_png_ = _repr_png_\n mpl.colors.Colormap._repr_html_ = _repr_html_\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 102, "name": "self", "kind": "ref", "category": "function", "info": " pixels = self(X, bytes=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 105, "name": "getvalue", "kind": "ref", "category": "function", "info": " return png_bytes.getvalue()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 107, "name": "_repr_html_", "kind": "def", "category": "function", "info": " def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 110, "name": "_repr_png_", "kind": "ref", "category": "function", "info": " png_bytes = self._repr_png_()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 121, "name": "color_palette", "kind": "def", "category": "function", "info": "def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):\n \"\"\"Return a list of colors or continuous colormap defining a palette.\n\n Possible ``palette`` values include:\n - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)\n - Name of matplotlib colormap\n - 'husl' or 'hls'\n - 'ch:'\n - 'light:', 'dark:', 'blend:,',\n - A sequence of colors in any format matplotlib accepts\n\n Calling this function with ``palette=None`` will return the current\n matplotlib color cycle.\n\n This function can also be used in a ``with`` statement to temporarily\n set the color cycle for a plot or set of plots.\n\n See the :ref:`tutorial ` for more information.\n\n Parameters\n ----------\n palette : None, string, or sequence, optional\n Name of palette or None to return current palette. If a sequence, input\n colors are used but possibly cycled and desaturated.\n n_colors : int, optional\n Number of colors in the palette. If ``None``, the default will depend\n on how ``palette`` is specified. Named palettes default to 6 colors,\n but grabbing the current palette or passing in a list of colors will\n not change the number of colors unless this is specified. Asking for\n more colors than exist in the palette will cause it to cycle. Ignored\n when ``as_cmap`` is True.\n desat : float, optional\n Proportion to desaturate each color by.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n set_palette : Set the default color cycle for all plots.\n set_color_codes : Reassign color codes like ``\"b\"``, ``\"g\"``, etc. to\n colors from one of the seaborn palettes.\n\n Examples\n --------\n\n .. include:: ../docstrings/color_palette.rst\n\n \"\"\"\n if palette is None:\n palette = get_color_cycle()\n if n_colors is None:\n n_colors = len(palette)\n\n elif not isinstance(palette, str):\n palette = palette\n if n_colors is None:\n n_colors = len(palette)\n else:\n\n if n_colors is None:\n # Use all colors in a qualitative palette or 6 of another kind\n n_colors = QUAL_PALETTE_SIZES.get(palette, 6)\n\n if palette in SEABORN_PALETTES:\n # Named \"seaborn variant\" of matplotlib default color cycle\n palette = SEABORN_PALETTES[palette]\n\n elif palette == \"hls\":\n # Evenly spaced colors in cylindrical RGB space\n palette = hls_palette(n_colors, as_cmap=as_cmap)\n\n elif palette == \"husl\":\n # Evenly spaced colors in cylindrical Lab space\n palette = husl_palette(n_colors, as_cmap=as_cmap)\n\n elif palette.lower() == \"jet\":\n # Paternalism\n raise ValueError(\"No.\")\n\n elif palette.startswith(\"ch:\"):\n # Cubehelix palette with params specified in string\n args, kwargs = _parse_cubehelix_args(palette)\n palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n\n elif palette.startswith(\"light:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"dark:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"blend:\"):\n # blend palette between colors specified in string\n _, colors = palette.split(\":\")\n colors = colors.split(\",\")\n palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n\n else:\n try:\n # Perhaps a named matplotlib colormap?\n palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n except (ValueError, KeyError): # Error class changed in mpl36\n raise ValueError(f\"{palette!r} is not a valid palette name\")\n\n if desat is not None:\n palette = [desaturate(c, desat) for c in palette]\n\n if not as_cmap:\n\n # Always return as many colors as we asked for\n pal_cycle = cycle(palette)\n palette = [next(pal_cycle) for _ in range(n_colors)]\n\n # Always return in r, g, b tuple format\n try:\n palette = map(mpl.colors.colorConverter.to_rgb, palette)\n palette = _ColorPalette(palette)\n except ValueError:\n raise ValueError(f\"Could not generate a palette for {palette}\")\n\n return palette\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 174, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " palette = get_color_cycle()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 194, "name": "hls_palette", "kind": "ref", "category": "function", "info": " palette = hls_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 198, "name": "husl_palette", "kind": "ref", "category": "function", "info": " palette = husl_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 206, "name": "_parse_cubehelix_args", "kind": "ref", "category": "function", "info": " args, kwargs = _parse_cubehelix_args(palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 207, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 215, "name": "light_palette", "kind": "ref", "category": "function", "info": " palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 223, "name": "dark_palette", "kind": "ref", "category": "function", "info": " palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 229, "name": "blend_palette", "kind": "ref", "category": "function", "info": " palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 234, "name": "mpl_palette", "kind": "ref", "category": "function", "info": " palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 239, "name": "desaturate", "kind": "ref", "category": "function", "info": " palette = [desaturate(c, desat) for c in palette]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 250, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " palette = _ColorPalette(palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 257, "name": "hls_palette", "kind": "def", "category": "function", "info": "def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa\n \"\"\"\n Return hues with constant lightness and saturation in the HLS system.\n\n The hues are evenly sampled along a circular path. The resulting palette will be\n appropriate for categorical or cyclical data.\n\n The `h`, `l`, and `s` values should be between 0 and 1.\n\n .. note::\n While the separation of the resulting colors will be mathematically\n constant, the HLS system does not construct a perceptually-uniform space,\n so their apparent intensity will vary.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n h : float\n The value of the first hue.\n l : float\n The lightness value.\n s : float\n The saturation intensity.\n as_cmap : bool\n If True, return a matplotlib colormap object.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n husl_palette : Make a palette using evenly spaced hues in the HUSL system.\n\n Examples\n --------\n .. include:: ../docstrings/hls_palette.rst\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues -= hues.astype(int)\n palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hls\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 303, "name": "astype", "kind": "ref", "category": "function", "info": " hues -= hues.astype(int)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 306, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hls\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 308, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 311, "name": "husl_palette", "kind": "def", "category": "function", "info": "def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa\n \"\"\"\n Return hues with constant lightness and saturation in the HUSL system.\n\n The hues are evenly sampled along a circular path. The resulting palette will be\n appropriate for categorical or cyclical data.\n\n The `h`, `l`, and `s` values should be between 0 and 1.\n\n This function is similar to :func:`hls_palette`, but it uses a nonlinear color\n space that is more perceptually uniform.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n h : float\n The value of the first hue.\n l : float\n The lightness value.\n s : float\n The saturation intensity.\n as_cmap : bool\n If True, return a matplotlib colormap object.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n hls_palette : Make a palette using evenly spaced hues in the HSL system.\n\n Examples\n --------\n .. include:: ../docstrings/husl_palette.rst\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues *= 359\n s *= 99\n l *= 99 # noqa\n palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hsl\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 358, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 360, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hsl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 362, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 365, "name": "mpl_palette", "kind": "def", "category": "function", "info": "def mpl_palette(name, n_colors=6, as_cmap=False):\n \"\"\"\n Return a palette or colormap from the matplotlib registry.\n\n For continuous palettes, evenly-spaced discrete samples are chosen while\n excluding the minimum and maximum value in the colormap to provide better\n contrast at the extremes.\n\n For qualitative palettes (e.g. those from colorbrewer), exact values are\n indexed (rather than interpolated), but fewer than `n_colors` can be returned\n if the palette does not define that many.\n\n Parameters\n ----------\n name : string\n Name of the palette. This should be a named matplotlib colormap.\n n_colors : int\n Number of discrete colors in the palette.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n Examples\n --------\n .. include: ../docstrings/mpl_palette.rst\n\n \"\"\"\n if name.endswith(\"_d\"):\n sub_name = name[:-2]\n if sub_name.endswith(\"_r\"):\n reverse = True\n sub_name = sub_name[:-2]\n else:\n reverse = False\n pal = color_palette(sub_name, 2) + [\"#333333\"]\n if reverse:\n pal = pal[::-1]\n cmap = blend_palette(pal, n_colors, as_cmap=True)\n else:\n cmap = get_colormap(name)\n\n if name in MPL_QUAL_PALS:\n bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]\n else:\n bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]\n palette = list(map(tuple, cmap(bins)[:, :3]))\n\n if as_cmap:\n return cmap\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 400, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal = color_palette(sub_name, 2) + [\"#333333\"]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 403, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(pal, n_colors, as_cmap=True)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 405, "name": "get_colormap", "kind": "ref", "category": "function", "info": " cmap = get_colormap(name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 411, "name": "cmap", "kind": "ref", "category": "function", "info": " palette = list(map(tuple, cmap(bins)[:, :3]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 416, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 419, "name": "_color_to_rgb", "kind": "def", "category": "function", "info": "def _color_to_rgb(color, input):\n \"\"\"Add some more flexibility to color choices.\"\"\"\n if input == \"hls\":\n color = colorsys.hls_to_rgb(*color)\n elif input == \"husl\":\n color = husl.husl_to_rgb(*color)\n color = tuple(np.clip(color, 0, 1))\n elif input == \"xkcd\":\n color = xkcd_rgb[color]\n\n return mpl.colors.to_rgb(color)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 424, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " color = husl.husl_to_rgb(*color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 429, "name": "to_rgb", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgb(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 432, "name": "dark_palette", "kind": "def", "category": "function", "info": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from dark to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_dark_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex, rgb-tuple, or html color name\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n .. include:: ../docstrings/dark_palette.rst\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 15\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 475, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 476, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 478, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 480, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 483, "name": "light_palette", "kind": "def", "category": "function", "info": "def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from light to ``color``.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using a Jupyter notebook, you can also choose this palette\n interactively with the :func:`choose_light_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex code, html color name, or tuple in `input` space.\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n .. include:: ../docstrings/light_palette.rst\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 95\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 523, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 524, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 526, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 528, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 531, "name": "diverging_palette", "kind": "def", "category": "function", "info": "def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa\n center=\"light\", as_cmap=False):\n \"\"\"Make a diverging palette between two HUSL colors.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_diverging_palette` function.\n\n Parameters\n ----------\n h_neg, h_pos : float in [0, 359]\n Anchor hues for negative and positive extents of the map.\n s : float in [0, 100], optional\n Anchor saturation for both extents of the map.\n l : float in [0, 100], optional\n Anchor lightness for both extents of the map.\n sep : int, optional\n Size of the intermediate region.\n n : int, optional\n Number of colors in the palette (if not returning a cmap)\n center : {\"light\", \"dark\"}, optional\n Whether the center of the palette is light or dark\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark values.\n light_palette : Create a sequential palette with light values.\n\n Examples\n --------\n .. include: ../docstrings/diverging_palette.rst\n\n \"\"\"\n palfunc = dict(dark=dark_palette, light=light_palette)[center]\n n_half = int(128 - (sep // 2))\n neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]\n mid = midpoint * sep\n pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 572, "name": "palfunc", "kind": "ref", "category": "function", "info": " neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 573, "name": "palfunc", "kind": "ref", "category": "function", "info": " pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 576, "name": "blend_palette", "kind": "ref", "category": "function", "info": " pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 580, "name": "blend_palette", "kind": "def", "category": "function", "info": "def blend_palette(colors, n_colors=6, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a palette that blends between a list of colors.\n\n Parameters\n ----------\n colors : sequence of colors in various formats interpreted by `input`\n hex code, html color name, or tuple in `input` space.\n n_colors : int, optional\n Number of colors in the palette.\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n Examples\n --------\n .. include: ../docstrings/blend_palette.rst\n\n \"\"\"\n colors = [_color_to_rgb(color, input) for color in colors]\n name = \"blend\"\n pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n if not as_cmap:\n rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n pal = _ColorPalette(map(tuple, rgb_array))\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 602, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " colors = [_color_to_rgb(color, input) for color in colors]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 604, "name": "from_list", "kind": "ref", "category": "function", "info": " pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 606, "name": "pal", "kind": "ref", "category": "function", "info": " rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 607, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " pal = _ColorPalette(map(tuple, rgb_array))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 611, "name": "xkcd_palette", "kind": "def", "category": "function", "info": "def xkcd_palette(colors):\n \"\"\"Make a palette with color names from the xkcd color survey.\n\n See xkcd for the full list of colors: https://xkcd.com/color/rgb/\n\n This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the `seaborn.xkcd_rgb` dictionary.\n\n Returns\n -------\n palette\n A list of colors as RGB tuples.\n\n See Also\n --------\n crayon_palette : Make a palette with Crayola crayon colors.\n\n \"\"\"\n palette = [xkcd_rgb[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 634, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 637, "name": "crayon_palette", "kind": "def", "category": "function", "info": "def crayon_palette(colors):\n \"\"\"Make a palette with color names from Crayola crayons.\n\n Colors are taken from here:\n https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors\n\n This is just a simple wrapper around the `seaborn.crayons` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the `seaborn.crayons` dictionary.\n\n Returns\n -------\n palette\n A list of colors as RGB tuples.\n\n See Also\n --------\n xkcd_palette : Make a palette with named colors from the XKCD color survey.\n\n \"\"\"\n palette = [crayons[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 661, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 664, "name": "cubehelix_palette", "kind": "def", "category": "function", "info": "def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,\n light=.85, dark=.15, reverse=False, as_cmap=False):\n \"\"\"Make a sequential palette from the cubehelix system.\n\n This produces a colormap with linearly-decreasing (or increasing)\n brightness. That means that information will be preserved if printed to\n black and white or viewed by someone who is colorblind. \"cubehelix\" is\n also available as a matplotlib-based palette, but this function gives the\n user more control over the look of the palette and has a different set of\n defaults.\n\n In addition to using this function, it is also possible to generate a\n cubehelix palette generally in seaborn using a string starting with\n `ch:` and containing other parameters (e.g. `\"ch:s=.25,r=-.5\"`).\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n start : float, 0 <= start <= 3\n The hue value at the start of the helix.\n rot : float\n Rotations around the hue wheel over the range of the palette.\n gamma : float 0 <= gamma\n Nonlinearity to emphasize dark (gamma < 1) or light (gamma > 1) colors.\n hue : float, 0 <= hue <= 1\n Saturation of the colors.\n dark : float 0 <= dark <= 1\n Intensity of the darkest color in the palette.\n light : float 0 <= light <= 1\n Intensity of the lightest color in the palette.\n reverse : bool\n If True, the palette will go from dark to light.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n choose_cubehelix_palette : Launch an interactive widget to select cubehelix\n palette parameters.\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n\n References\n ----------\n Green, D. A. (2011). \"A colour scheme for the display of astronomical\n intensity images\". Bulletin of the Astromical Society of India, Vol. 39,\n p. 289-295.\n\n Examples\n --------\n .. include:: ../docstrings/cubehelix_palette.rst\n\n \"\"\"\n def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 723, "name": "get_color_function", "kind": "def", "category": "function", "info": " def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 725, "name": "color", "kind": "def", "category": "function", "info": " def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 740, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"red\": get_color_function(-0.14861, 1.78277),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 741, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"green\": get_color_function(-0.29227, -0.90649),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 742, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"blue\": get_color_function(1.97294, 0.0),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 745, "name": "LinearSegmentedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 748, "name": "cmap", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 748, "name": "tolist", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 756, "name": "cmap", "kind": "ref", "category": "function", "info": " pal_256 = cmap(x_256)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 757, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 760, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 763, "name": "_parse_cubehelix_args", "kind": "def", "category": "function", "info": "def _parse_cubehelix_args(argstr):\n \"\"\"Turn stringified cubehelix params into args/kwargs.\"\"\"\n\n if argstr.startswith(\"ch:\"):\n argstr = argstr[3:]\n\n if argstr.endswith(\"_r\"):\n reverse = True\n argstr = argstr[:-2]\n else:\n reverse = False\n\n if not argstr:\n return [], {\"reverse\": reverse}\n\n all_args = argstr.split(\",\")\n\n args = [float(a.strip(\" \")) for a in all_args if \"=\" not in a]\n\n kwargs = [a.split(\"=\") for a in all_args if \"=\" in a]\n kwargs = {k.strip(\" \"): float(v.strip(\" \")) for k, v in kwargs}\n\n kwarg_map = dict(\n s=\"start\", r=\"rot\", g=\"gamma\",\n h=\"hue\", l=\"light\", d=\"dark\", # noqa: E741\n )\n\n kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}\n\n if reverse:\n kwargs[\"reverse\"] = True\n\n return args, kwargs\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 798, "name": "set_color_codes", "kind": "def", "category": "function", "info": "def set_color_codes(palette=\"deep\"):\n \"\"\"Change how matplotlib color shorthands are interpreted.\n\n Calling this will change how shorthand codes like \"b\" or \"g\"\n are interpreted by matplotlib in subsequent plots.\n\n Parameters\n ----------\n palette : {deep, muted, pastel, dark, bright, colorblind}\n Named seaborn palette to use as the source of colors.\n\n See Also\n --------\n set : Color codes can be set through the high-level seaborn style\n manager.\n set_palette : Color codes can also be set through the function that\n sets the matplotlib color cycle.\n\n \"\"\"\n if palette == \"reset\":\n colors = [\n (0., 0., 1.),\n (0., .5, 0.),\n (1., 0., 0.),\n (.75, 0., .75),\n (.75, .75, 0.),\n (0., .75, .75),\n (0., 0., 0.)\n ]\n elif not isinstance(palette, str):\n err = \"set_color_codes requires a named seaborn palette\"\n raise TypeError(err)\n elif palette in SEABORN_PALETTES:\n if not palette.endswith(\"6\"):\n palette = palette + \"6\"\n colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]\n else:\n err = f\"Cannot set colors with palette '{palette}'\"\n raise ValueError(err)\n\n for code, color in zip(\"bgrmyck\", colors):\n rgb = mpl.colors.colorConverter.to_rgb(color)\n mpl.colors.colorConverter.colors[code] = rgb\n mpl.colors.colorConverter.cache[code] = rgb\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 839, "name": "to_rgb", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 81, "name": "set_theme", "kind": "def", "category": "function", "info": "def set_theme(context=\"notebook\", style=\"darkgrid\", palette=\"deep\",\n font=\"sans-serif\", font_scale=1, color_codes=True, rc=None):\n \"\"\"\n Set aspects of the visual theme for all matplotlib and seaborn plots.\n\n This function changes the global defaults for all plots using the\n matplotlib rcParams system. The themeing is decomposed into several distinct\n sets of parameter values.\n\n The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>`\n and :doc:`color palette <../tutorial/color_palettes>` tutorials.\n\n Parameters\n ----------\n context : string or dict\n Scaling parameters, see :func:`plotting_context`.\n style : string or dict\n Axes style parameters, see :func:`axes_style`.\n palette : string or sequence\n Color palette, see :func:`color_palette`.\n font : string\n Font family, see matplotlib font manager.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n rc : dict or None\n Dictionary of rc parameter mappings to override the above.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_theme.rst\n\n \"\"\"\n set_context(context, font_scale)\n set_style(style, rc={\"font.family\": font})\n set_palette(palette, color_codes=color_codes)\n if rc is not None:\n mpl.rcParams.update(rc)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 118, "name": "set_context", "kind": "ref", "category": "function", "info": " set_context(context, font_scale)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 119, "name": "set_style", "kind": "ref", "category": "function", "info": " set_style(style, rc={\"font.family\": font})\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 120, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(palette, color_codes=color_codes)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 131, "name": "set_theme", "kind": "ref", "category": "function", "info": " set_theme(*args, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 134, "name": "reset_defaults", "kind": "def", "category": "function", "info": "def reset_defaults():\n \"\"\"Restore all RC params to default settings.\"\"\"\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 139, "name": "reset_orig", "kind": "def", "category": "function", "info": "def reset_orig():\n \"\"\"Restore all RC params to original settings (respects custom rc).\"\"\"\n from . import _orig_rc_params\n mpl.rcParams.update(_orig_rc_params)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 145, "name": "axes_style", "kind": "def", "category": "function", "info": "def axes_style(style=None, rc=None):\n \"\"\"\n Get the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_style`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n style : None, dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/axes_style.rst\n\n \"\"\"\n if style is None:\n style_dict = {k: mpl.rcParams[k] for k in _style_keys}\n\n elif isinstance(style, dict):\n style_dict = style\n\n else:\n styles = [\"white\", \"dark\", \"whitegrid\", \"darkgrid\", \"ticks\"]\n if style not in styles:\n raise ValueError(f\"style must be one of {', '.join(styles)}\")\n\n # Define colors here\n dark_gray = \".15\"\n light_gray = \".8\"\n\n # Common parameters\n style_dict = {\n\n \"figure.facecolor\": \"white\",\n \"axes.labelcolor\": dark_gray,\n\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": dark_gray,\n \"ytick.color\": dark_gray,\n\n \"axes.axisbelow\": True,\n \"grid.linestyle\": \"-\",\n\n\n \"text.color\": dark_gray,\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\"Arial\", \"DejaVu Sans\", \"Liberation Sans\",\n \"Bitstream Vera Sans\", \"sans-serif\"],\n\n\n \"lines.solid_capstyle\": \"round\",\n \"patch.edgecolor\": \"w\",\n \"patch.force_edgecolor\": True,\n\n \"image.cmap\": \"rocket\",\n\n \"xtick.top\": False,\n \"ytick.right\": False,\n\n }\n\n # Set grid on or off\n if \"grid\" in style:\n style_dict.update({\n \"axes.grid\": True,\n })\n else:\n style_dict.update({\n \"axes.grid\": False,\n })\n\n # Set the color of the background, spines, and grids\n if style.startswith(\"dark\"):\n style_dict.update({\n\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"grid.color\": \"white\",\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style == \"whitegrid\":\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": light_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style in [\"white\", \"ticks\"]:\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": dark_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n # Show or hide the axes ticks\n if style == \"ticks\":\n style_dict.update({\n \"xtick.bottom\": True,\n \"ytick.left\": True,\n })\n else:\n style_dict.update({\n \"xtick.bottom\": False,\n \"ytick.left\": False,\n })\n\n # Remove entries that are not defined in the base list of valid keys\n # This lets us handle matplotlib <=/> 2.0\n style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _style_keys}\n style_dict.update(rc)\n\n # Wrap in an _AxesStyle object so this can be used in a with statement\n style_object = _AxesStyle(style_dict)\n\n return style_object\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 297, "name": "_AxesStyle", "kind": "ref", "category": "function", "info": " style_object = _AxesStyle(style_dict)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 302, "name": "set_style", "kind": "def", "category": "function", "info": "def set_style(style=None, rc=None):\n \"\"\"\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n See :func:`axes_style` to get the parameter values.\n\n Parameters\n ----------\n style : dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_style.rst\n\n \"\"\"\n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 330, "name": "axes_style", "kind": "ref", "category": "function", "info": " style_object = axes_style(style, rc)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 334, "name": "plotting_context", "kind": "def", "category": "function", "info": "def plotting_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Get the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_context`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n context : None, dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/plotting_context.rst\n\n \"\"\"\n if context is None:\n context_dict = {k: mpl.rcParams[k] for k in _context_keys}\n\n elif isinstance(context, dict):\n context_dict = context\n\n else:\n\n contexts = [\"paper\", \"notebook\", \"talk\", \"poster\"]\n if context not in contexts:\n raise ValueError(f\"context must be in {', '.join(contexts)}\")\n\n # Set up dictionary of default parameters\n texts_base_context = {\n\n \"font.size\": 12,\n \"axes.labelsize\": 12,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 11,\n \"ytick.labelsize\": 11,\n \"legend.fontsize\": 11,\n \"legend.title_fontsize\": 12,\n\n }\n\n base_context = {\n\n \"axes.linewidth\": 1.25,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.5,\n \"lines.markersize\": 6,\n \"patch.linewidth\": 1,\n\n \"xtick.major.width\": 1.25,\n \"ytick.major.width\": 1.25,\n \"xtick.minor.width\": 1,\n \"ytick.minor.width\": 1,\n\n \"xtick.major.size\": 6,\n \"ytick.major.size\": 6,\n \"xtick.minor.size\": 4,\n \"ytick.minor.size\": 4,\n\n }\n base_context.update(texts_base_context)\n\n # Scale all the parameters by the same factor depending on the context\n scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]\n context_dict = {k: v * scaling for k, v in base_context.items()}\n\n # Now independently scale the fonts\n font_keys = texts_base_context.keys()\n font_dict = {k: context_dict[k] * font_scale for k in font_keys}\n context_dict.update(font_dict)\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _context_keys}\n context_dict.update(rc)\n\n # Wrap in a _PlottingContext object so this can be used in a with statement\n context_object = _PlottingContext(context_dict)\n\n return context_object\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 430, "name": "_PlottingContext", "kind": "ref", "category": "function", "info": " context_object = _PlottingContext(context_dict)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 435, "name": "set_context", "kind": "def", "category": "function", "info": "def set_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Set the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n See :func:`plotting_context` to get the parameter values.\n\n Parameters\n ----------\n context : dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_context.rst\n\n \"\"\"\n context_object = plotting_context(context, font_scale, rc)\n mpl.rcParams.update(context_object)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 468, "name": "plotting_context", "kind": "ref", "category": "function", "info": " context_object = plotting_context(context, font_scale, rc)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 472, "name": "_RCAesthetics", "kind": "def", "category": "class", "info": "__enter__\t__exit__\t__call__"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 473, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n rc = mpl.rcParams\n self._orig = {k: rc[k] for k in self._keys}\n self._set(self)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 476, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 478, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 479, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self._orig)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 483, "name": "wrapper", "kind": "def", "category": "function", "info": " def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 485, "name": "func", "kind": "ref", "category": "function", "info": " return func(*args, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 489, "name": "_AxesStyle", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 495, "name": "_PlottingContext", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 501, "name": "set_palette", "kind": "def", "category": "function", "info": "def set_palette(palette, n_colors=None, desat=None, color_codes=False):\n \"\"\"Set the matplotlib color cycle using a seaborn palette.\n\n Parameters\n ----------\n palette : seaborn color paltte | matplotlib colormap | hls | husl\n Palette definition. Should be something :func:`color_palette` can process.\n n_colors : int\n Number of colors in the cycle. The default number of colors will depend\n on the format of ``palette``, see the :func:`color_palette`\n documentation for more information.\n desat : float\n Proportion to desaturate each color by.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n\n See Also\n --------\n color_palette : build a color palette or set the color cycle temporarily\n in a ``with`` statement.\n set_context : set parameters to scale plot elements\n set_style : set the default parameters for figure style\n\n \"\"\"\n colors = palettes.color_palette(palette, n_colors, desat)\n cyl = cycler('color', colors)\n mpl.rcParams['axes.prop_cycle'] = cyl\n if color_codes:\n try:\n palettes.set_color_codes(palette)\n except (ValueError, TypeError):\n pass\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 526, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = palettes.color_palette(palette, n_colors, desat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 531, "name": "set_color_codes", "kind": "ref", "category": "function", "info": " palettes.set_color_codes(palette)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 24, "name": "_LinearPlotter", "kind": "def", "category": "class", "info": "establish_variables\tdropna\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 31, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, data, **kws):\n \"\"\"Extract variables from data or use directly.\"\"\"\n self.data = data\n\n # Validate the inputs\n any_strings = any([isinstance(v, str) for v in kws.values()])\n if any_strings and data is None:\n raise ValueError(\"Must pass `data` if using named variables.\")\n\n # Set the variables\n for var, val in kws.items():\n if isinstance(val, str):\n vector = data[val]\n elif isinstance(val, list):\n vector = np.asarray(val)\n else:\n vector = val\n if vector is not None and vector.shape != (1,):\n vector = np.squeeze(vector)\n if np.ndim(vector) > 1:\n err = \"regplot inputs must be 1d\"\n raise ValueError(err)\n setattr(self, var, vector)\n\n def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 55, "name": "dropna", "kind": "def", "category": "function", "info": " def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 69, "name": "_RegressionPlotter", "kind": "def", "category": "class", "info": "__init__\tscatter_data\testimate_data\tfit_regression\tfit_fast\tfit_poly\tfit_statsmodels\tfit_lowess\tfit_logx\tbin_predictor\tregress_out\tplot\tscatterplot\tlineplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 106, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(data, x=x, y=y, units=units,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 111, "name": "dropna", "kind": "ref", "category": "function", "info": " self.dropna(\"x\", \"y\", \"units\", \"x_partial\", \"y_partial\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 115, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.x = self.regress_out(self.x, self.x_partial)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 117, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.y = self.regress_out(self.y, self.y_partial)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 122, "name": "bin_predictor", "kind": "ref", "category": "function", "info": " x_discrete, x_bins = self.bin_predictor(x_bins)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 136, "name": "scatter_data", "kind": "def", "category": "function", "info": " def scatter_data(self):\n \"\"\"Data where each observation is a point.\"\"\"\n x_j = self.x_jitter\n if x_j is None:\n x = self.x\n else:\n x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n\n y_j = self.y_jitter\n if y_j is None:\n y = self.y\n else:\n y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n\n return x, y\n\n @property\n def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 142, "name": "uniform", "kind": "ref", "category": "function", "info": " x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 148, "name": "uniform", "kind": "ref", "category": "function", "info": " y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 153, "name": "estimate_data", "kind": "def", "category": "function", "info": " def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 163, "name": "x_estimator", "kind": "ref", "category": "function", "info": " est = self.x_estimator(_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 177, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = algo.bootstrap(_y,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 182, "name": "ci", "kind": "ref", "category": "function", "info": " _ci = utils.ci(boots, self.x_ci)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 187, "name": "fit_regression", "kind": "def", "category": "function", "info": " def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 197, "name": "get_xlim", "kind": "ref", "category": "function", "info": " x_min, x_max = ax.get_xlim()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 203, "name": "fit_poly", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_poly(grid, self.order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 207, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 211, "name": "fit_lowess", "kind": "ref", "category": "function", "info": " grid, yhat = self.fit_lowess()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 214, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 216, "name": "fit_logx", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_logx(grid)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 218, "name": "fit_fast", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_fast(grid)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 224, "name": "ci", "kind": "ref", "category": "function", "info": " err_bands = utils.ci(yhat_boots, ci, axis=0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 228, "name": "fit_fast", "kind": "def", "category": "function", "info": " def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 230, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 231, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 235, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 239, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 247, "name": "fit_poly", "kind": "def", "category": "function", "info": " def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 249, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 253, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(x, y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 257, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(x, y,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 264, "name": "fit_statsmodels", "kind": "def", "category": "function", "info": " def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 270, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 272, "name": "model", "kind": "ref", "category": "function", "info": " yhat = model(_y, _x, **kwargs).fit().predict(grid)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 278, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(X, y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 282, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 289, "name": "fit_lowess", "kind": "def", "category": "function", "info": " def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 295, "name": "fit_logx", "kind": "def", "category": "function", "info": " def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 300, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.genmod.generalized_linear_model as glm\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n try:\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except glm.PerfectSeparationError:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 302, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 304, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 308, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 316, "name": "bin_predictor", "kind": "def", "category": "function", "info": " def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 330, "name": "regress_out", "kind": "def", "category": "function", "info": " def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 336, "name": "pinv", "kind": "ref", "category": "function", "info": " a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 350, "name": "get_color", "kind": "ref", "category": "function", "info": " color = lines.get_color()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 364, "name": "scatterplot", "kind": "ref", "category": "function", "info": " self.scatterplot(ax, scatter_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 367, "name": "lineplot", "kind": "ref", "category": "function", "info": " self.lineplot(ax, line_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 371, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.x.name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 373, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.y.name)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 375, "name": "scatterplot", "kind": "def", "category": "function", "info": " def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 409, "name": "lineplot", "kind": "def", "category": "function", "info": " def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 412, "name": "fit_regression", "kind": "ref", "category": "function", "info": " grid, yhat, err_bands = self.fit_regression(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 559, "name": "lmplot", "kind": "def", "category": "function", "info": "def lmplot(\n data=None, *,\n x=None, y=None, hue=None, col=None, row=None,\n palette=None, col_wrap=None, height=5, aspect=1, markers=\"o\",\n sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,\n legend=True, legend_out=None, x_estimator=None, x_bins=None,\n x_ci=\"ci\", scatter=True, fit_reg=True, ci=95, n_boot=1000,\n units=None, seed=None, order=1, logistic=False, lowess=False,\n robust=False, logx=False, x_partial=None, y_partial=None,\n truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,\n line_kws=None, facet_kws=None,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 575, "name": "facet_kw_deprecation", "kind": "def", "category": "function", "info": " def facet_kw_deprecation(key, val):\n msg = (\n f\"{key} is deprecated from the `lmplot` function signature. \"\n \"Please update your code to pass it using `facet_kws`.\"\n )\n if val is not None:\n warnings.warn(msg, UserWarning)\n facet_kws[key] = val\n\n facet_kw_deprecation(\"sharex\", sharex)\n facet_kw_deprecation(\"sharey\", sharey)\n facet_kw_deprecation(\"legend_out\", legend_out)\n\n if data is None:\n raise TypeError(\"Missing required keyword argument `data`.\")\n\n # Reduce the dataframe to only needed columns\n need_cols = [x, y, hue, col, row, units, x_partial, y_partial]\n cols = np.unique([a for a in need_cols if a is not None]).tolist()\n data = data[cols]\n\n # Initialize the grid\n facets = FacetGrid(\n data, row=row, col=col, hue=hue,\n palette=palette,\n row_order=row_order, col_order=col_order, hue_order=hue_order,\n height=height, aspect=aspect, col_wrap=col_wrap,\n **facet_kws,\n )\n\n # Add the markers here as FacetGrid has figured out how many levels of the\n # hue variable are needed and we don't want to duplicate that process\n if facets.hue_names is None:\n n_markers = 1\n else:\n n_markers = len(facets.hue_names)\n if not isinstance(markers, list):\n markers = [markers] * n_markers\n if len(markers) != n_markers:\n raise ValueError(\"markers must be a singleton or a list of markers \"\n \"for each level of the hue variable\")\n facets.hue_kws = {\"marker\": markers}\n\n def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 584, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharex\", sharex)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 585, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharey\", sharey)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 586, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"legend_out\", legend_out)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 593, "name": "tolist", "kind": "ref", "category": "function", "info": " cols = np.unique([a for a in need_cols if a is not None]).tolist()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 597, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " facets = FacetGrid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 618, "name": "update_datalim", "kind": "def", "category": "function", "info": " def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 619, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 619, "name": "astype", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 620, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys, updatey=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 621, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scaley=False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 623, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(update_datalim, x=x, y=y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 634, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 635, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " facets.set_axis_labels(x, y)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 639, "name": "add_legend", "kind": "ref", "category": "function", "info": " facets.add_legend()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 735, "name": "regplot", "kind": "def", "category": "function", "info": "def regplot(\n data=None, *, x=None, y=None,\n x_estimator=None, x_bins=None, x_ci=\"ci\",\n scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,\n seed=None, order=1, logistic=False, lowess=False, robust=False,\n logx=False, x_partial=None, y_partial=None,\n truncate=True, dropna=True, x_jitter=None, y_jitter=None,\n label=None, color=None, marker=\"o\",\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 746, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 837, "name": "residplot", "kind": "def", "category": "function", "info": "def residplot(\n data=None, *, x=None, y=None,\n x_partial=None, y_partial=None, lowess=False,\n order=1, robust=False, dropna=True, label=None, color=None,\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 898, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, ci=None,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 907, "name": "fit_regression", "kind": "ref", "category": "function", "info": " _, yhat, _ = plotter.fit_regression(grid=plotter.x)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 24, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": "_relational_narrative = DocstringComponents(dict(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 175, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 177, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 178, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " rel=DocstringComponents(_relational_docs),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 179, "name": "from_function_params", "kind": "ref", "category": "function", "info": " stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 183, "name": "_RelationalPlotter", "kind": "def", "category": "class", "info": "add_legend_data"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 192, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax):\n \"\"\"Add labeled artists to represent the different plot semantics.\"\"\"\n verbosity = self.legend\n if isinstance(verbosity, str) and verbosity not in [\"auto\", \"brief\", \"full\"]:\n err = \"`legend` must be 'auto', 'brief', 'full', or a boolean.\"\n raise ValueError(err)\n elif verbosity is True:\n verbosity = \"auto\"\n\n legend_kwargs = {}\n keys = []\n\n # Assign a legend title if there is only going to be one sub-legend,\n # otherwise, subtitles will be inserted into the texts list with an\n # invisible handle (which is a hack)\n titles = {\n title for title in\n (self.variables.get(v, None) for v in [\"hue\", \"size\", \"style\"])\n if title is not None\n }\n if len(titles) == 1:\n legend_title = titles.pop()\n else:\n legend_title = \"\"\n\n title_kws = dict(\n visible=False, color=\"w\", s=0, linewidth=0, marker=\"\", dashes=\"\"\n )\n\n def update(var_name, val_name, **kws):\n\n key = var_name, val_name\n if key in legend_kwargs:\n legend_kwargs[key].update(**kws)\n else:\n keys.append(key)\n\n legend_kwargs[key] = dict(**kws)\n\n # Define the maximum number of ticks to use for \"brief\" legends\n brief_ticks = 6\n\n # -- Add a legend for hue semantics\n brief_hue = self._hue_map.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(self._hue_map.levels) > brief_ticks)\n )\n if brief_hue:\n if isinstance(self._hue_map.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n limits = min(self._hue_map.levels), max(self._hue_map.levels)\n hue_levels, hue_formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[\"hue\"].infer_objects().dtype\n )\n elif self._hue_map.levels is None:\n hue_levels = hue_formatted_levels = []\n else:\n hue_levels = hue_formatted_levels = self._hue_map.levels\n\n # Add the hue semantic subtitle\n if not legend_title and self.variables.get(\"hue\", None) is not None:\n update((self.variables[\"hue\"], \"title\"),\n self.variables[\"hue\"], **title_kws)\n\n # Add the hue semantic labels\n for level, formatted_level in zip(hue_levels, hue_formatted_levels):\n if level is not None:\n color = self._hue_map(level)\n update(self.variables[\"hue\"], formatted_level, color=color)\n\n # -- Add a legend for size semantics\n brief_size = self._size_map.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(self._size_map.levels) > brief_ticks)\n )\n if brief_size:\n # Define how ticks will interpolate between the min/max data values\n if isinstance(self._size_map.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n # Define the min/max data values\n limits = min(self._size_map.levels), max(self._size_map.levels)\n size_levels, size_formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[\"size\"].infer_objects().dtype\n )\n elif self._size_map.levels is None:\n size_levels = size_formatted_levels = []\n else:\n size_levels = size_formatted_levels = self._size_map.levels\n\n # Add the size semantic subtitle\n if not legend_title and self.variables.get(\"size\", None) is not None:\n update((self.variables[\"size\"], \"title\"),\n self.variables[\"size\"], **title_kws)\n\n # Add the size semantic labels\n for level, formatted_level in zip(size_levels, size_formatted_levels):\n if level is not None:\n size = self._size_map(level)\n update(\n self.variables[\"size\"],\n formatted_level,\n linewidth=size,\n s=size,\n )\n\n # -- Add a legend for style semantics\n\n # Add the style semantic title\n if not legend_title and self.variables.get(\"style\", None) is not None:\n update((self.variables[\"style\"], \"title\"),\n self.variables[\"style\"], **title_kws)\n\n # Add the style semantic labels\n if self._style_map.levels is not None:\n for level in self._style_map.levels:\n if level is not None:\n attrs = self._style_map(level)\n update(\n self.variables[\"style\"],\n level,\n marker=attrs.get(\"marker\", \"\"),\n dashes=attrs.get(\"dashes\", \"\"),\n )\n\n func = getattr(ax, self._legend_func)\n\n legend_data = {}\n legend_order = []\n\n for key in keys:\n\n _, label = key\n kws = legend_kwargs[key]\n kws.setdefault(\"color\", \".2\")\n use_kws = {}\n for attr in self._legend_attributes + [\"visible\"]:\n if attr in kws:\n use_kws[attr] = kws[attr]\n artist = func([], [], label=label, **use_kws)\n if self._legend_func == \"plot\":\n artist = artist[0]\n legend_data[key] = artist\n legend_order.append(key)\n\n self.legend_title = legend_title\n self.legend_data = legend_data\n self.legend_order = legend_order\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 245, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " hue_levels, hue_formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 246, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[\"hue\"].infer_objects().dtype\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 261, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 277, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " size_levels, size_formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 278, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[\"size\"].infer_objects().dtype\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 293, "name": "_size_map", "kind": "ref", "category": "function", "info": " size = self._size_map(level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 312, "name": "_style_map", "kind": "ref", "category": "function", "info": " attrs = self._style_map(level)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 334, "name": "func", "kind": "ref", "category": "function", "info": " artist = func([], [], label=label, **use_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 345, "name": "_LinePlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 403, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " agg = EstimateAggregator(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 422, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 427, "name": "sort_values", "kind": "ref", "category": "function", "info": " sub_data = sub_data.sort_values(sort_cols)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 437, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped = sub_data.groupby(orient, sort=self.sort)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 440, "name": "apply", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, other).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 440, "name": "reset_index", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, other).reset_index()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 447, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 455, "name": "groupby", "kind": "ref", "category": "function", "info": " for _, unit_data in sub_data.groupby(\"units\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 463, "name": "set_color", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 463, "name": "_hue_map", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 466, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 466, "name": "_size_map", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 469, "name": "_style_map", "kind": "ref", "category": "function", "info": " attributes = self._style_map(sub_vars[\"style\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 471, "name": "set_dashes", "kind": "ref", "category": "function", "info": " line.set_dashes(attributes[\"dashes\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 473, "name": "set_marker", "kind": "ref", "category": "function", "info": " line.set_marker(attributes[\"marker\"])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 475, "name": "get_color", "kind": "ref", "category": "function", "info": " line_color = line.get_color()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 476, "name": "get_alpha", "kind": "ref", "category": "function", "info": " line_alpha = line.get_alpha()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 477, "name": "get_solid_capstyle", "kind": "ref", "category": "function", "info": " line_capstyle = line.get_solid_capstyle()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 488, "name": "func", "kind": "ref", "category": "function", "info": " func(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 509, "name": "get_children", "kind": "ref", "category": "function", "info": " for obj in ebars.get_children():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 511, "name": "set_capstyle", "kind": "ref", "category": "function", "info": " obj.set_capstyle(line_capstyle)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 514, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 516, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 517, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 520, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 523, "name": "_ScatterPlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 545, "name": "dropna", "kind": "ref", "category": "function", "info": " data = self.plot_data.dropna()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 559, "name": "_style_map", "kind": "ref", "category": "function", "info": " example_marker = self._style_map(example_level, \"marker\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 567, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 568, "name": "is_filled", "kind": "ref", "category": "function", "info": " if m.is_filled():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 577, "name": "set_facecolors", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 577, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 580, "name": "set_sizes", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 580, "name": "_size_map", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 583, "name": "_style_map", "kind": "ref", "category": "function", "info": " p = [self._style_map(val, \"path\") for val in data[\"style\"]]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 584, "name": "set_paths", "kind": "ref", "category": "function", "info": " points.set_paths(p)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 589, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 590, "name": "set_linewidths", "kind": "ref", "category": "function", "info": " points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 593, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 595, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 596, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 599, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 602, "name": "lineplot", "kind": "def", "category": "function", "info": "def lineplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n dashes=True, markers=None, style_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, seed=None,\n orient=\"x\", sort=True, err_style=\"band\", err_kws=None,\n legend=\"auto\", ci=\"deprecated\", ax=None, **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 614, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = _deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 616, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _LinePlotter.get_semantics(locals())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 617, "name": "_LinePlotter", "kind": "ref", "category": "function", "info": " p = _LinePlotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 624, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 625, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 626, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 637, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 642, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 731, "name": "scatterplot", "kind": "def", "category": "function", "info": "def scatterplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=True, style_order=None, legend=\"auto\", ax=None,\n **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 740, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _ScatterPlotter.get_semantics(locals())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 741, "name": "_ScatterPlotter", "kind": "ref", "category": "function", "info": " p = _ScatterPlotter(data=data, variables=variables, legend=legend)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 743, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 744, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 745, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, order=style_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 753, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 758, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 824, "name": "relplot", "kind": "def", "category": "function", "info": "def relplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n row=None, col=None, col_wrap=None, row_order=None, col_order=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=None, dashes=None, style_order=None,\n legend=\"auto\", kind=\"scatter\", height=5, aspect=1, facet_kws=None,\n **kwargs\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 861, "name": "plotter", "kind": "ref", "category": "function", "info": " p = plotter(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 863, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=plotter.get_semantics(locals()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 866, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 867, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 868, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 886, "name": "_style_map", "kind": "ref", "category": "function", "info": " markers = {k: p._style_map(k, \"marker\") for k in style_order}\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 890, "name": "_style_map", "kind": "ref", "category": "function", "info": " dashes = {k: p._style_map(k, \"dashes\") for k in style_order}\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 915, "name": "assign_variables", "kind": "ref", "category": "function", "info": " p.assign_variables(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 941, "name": "rename", "kind": "ref", "category": "function", "info": " full_data = p.plot_data.rename(columns=new_cols)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 945, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 946, "name": "dropna", "kind": "ref", "category": "function", "info": " data=full_data.dropna(axis=1, how=\"all\"),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 954, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(func, **plot_kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 958, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(variables.get(\"x\") or \"\", variables.get(\"y\") or \"\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 965, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " p.add_legend_data(g.axes.flat[0])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 967, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(legend_data=p.legend_data,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 977, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = g.data.rename(columns=orig_cols)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 23, "name": "ci_to_errsize", "kind": "def", "category": "function", "info": "def ci_to_errsize(cis, heights):\n \"\"\"Convert intervals to error arguments relative to plot heights.\n\n Parameters\n ----------\n cis : 2 x n sequence\n sequence of confidence interval limits\n heights : n sequence\n sequence of plot heights\n\n Returns\n -------\n errsize : 2 x n array\n sequence of error size relative to height values in correct\n format as argument for plt.bar\n\n \"\"\"\n cis = np.atleast_2d(cis).reshape(2, -1)\n heights = np.atleast_1d(heights)\n errsize = []\n for i, (low, high) in enumerate(np.transpose(cis)):\n h = heights[i]\n elow = h - low\n ehigh = high - h\n errsize.append([elow, ehigh])\n\n errsize = np.asarray(errsize).T\n return errsize\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 53, "name": "_normal_quantile_func", "kind": "def", "category": "function", "info": "def _normal_quantile_func(q):\n \"\"\"\n Compute the quantile function of the standard normal distribution.\n\n This wrapper exists because we are dropping scipy as a mandatory dependency\n but statistics.NormalDist was added to the standard library in 3.8.\n\n \"\"\"\n try:\n from statistics import NormalDist\n qf = np.vectorize(NormalDist().inv_cdf)\n except ImportError:\n try:\n from scipy.stats import norm\n qf = norm.ppf\n except ImportError:\n msg = (\n \"Standard normal quantile functions require either Python>=3.8 or scipy\"\n )\n raise RuntimeError(msg)\n return qf(q)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 73, "name": "qf", "kind": "ref", "category": "function", "info": " return qf(q)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 76, "name": "_draw_figure", "kind": "def", "category": "function", "info": "def _draw_figure(fig):\n \"\"\"Force draw of a matplotlib figure, accounting for back-compat.\"\"\"\n # See https://github.com/matplotlib/matplotlib/issues/19197 for context\n fig.canvas.draw()\n if fig.stale:\n try:\n fig.draw(fig.canvas.get_renderer())\n except AttributeError:\n pass\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 82, "name": "get_renderer", "kind": "ref", "category": "function", "info": " fig.draw(fig.canvas.get_renderer())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 87, "name": "_default_color", "kind": "def", "category": "function", "info": "def _default_color(method, hue, color, kws):\n \"\"\"If needed, get a default color by using the matplotlib property cycle.\"\"\"\n\n if hue is not None:\n # This warning is probably user-friendly, but it's currently triggered\n # in a FacetGrid context and I don't want to mess with that logic right now\n # if color is not None:\n # msg = \"`color` is ignored when `hue` is assigned.\"\n # warnings.warn(msg)\n return None\n\n kws = kws.copy()\n kws.pop(\"label\", None)\n\n if color is not None:\n return color\n\n elif method.__name__ == \"plot\":\n\n color = _normalize_kwargs(kws, mpl.lines.Line2D).get(\"color\")\n scout, = method([], [], scalex=False, scaley=False, color=color)\n color = scout.get_color()\n scout.remove()\n\n elif method.__name__ == \"scatter\":\n\n # Matplotlib will raise if the size of x/y don't match s/c,\n # and the latter might be in the kws dict\n scout_size = max(\n np.atleast_1d(kws.get(key, [])).shape[0]\n for key in [\"s\", \"c\", \"fc\", \"facecolor\", \"facecolors\"]\n )\n scout_x = scout_y = np.full(scout_size, np.nan)\n\n scout = method(scout_x, scout_y, **kws)\n facecolors = scout.get_facecolors()\n\n if not len(facecolors):\n # Handle bug in matplotlib <= 3.2 (I think)\n # This will limit the ability to use non color= kwargs to specify\n # a color in versions of matplotlib with the bug, but trying to\n # work out what the user wanted by re-implementing the broken logic\n # of inspecting the kwargs is probably too brittle.\n single_color = False\n else:\n single_color = np.unique(facecolors, axis=0).shape[0] == 1\n\n # Allow the user to specify an array of colors through various kwargs\n if \"c\" not in kws and single_color:\n color = to_rgb(facecolors[0])\n\n scout.remove()\n\n elif method.__name__ == \"bar\":\n\n # bar() needs masked, not empty data, to generate a patch\n scout, = method([np.nan], [np.nan], **kws)\n color = to_rgb(scout.get_facecolor())\n scout.remove()\n\n elif method.__name__ == \"fill_between\":\n\n # There is a bug on matplotlib < 3.3 where fill_between with\n # datetime units and empty data will set incorrect autoscale limits\n # To workaround it, we'll always return the first color in the cycle.\n # https://github.com/matplotlib/matplotlib/issues/17586\n ax = method.__self__\n datetime_axis = any([\n isinstance(ax.xaxis.converter, mpl.dates.DateConverter),\n isinstance(ax.yaxis.converter, mpl.dates.DateConverter),\n ])\n if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n return \"C0\"\n\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n\n scout = method([], [], **kws)\n facecolor = scout.get_facecolor()\n color = to_rgb(facecolor[0])\n scout.remove()\n\n return color\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 106, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " color = _normalize_kwargs(kws, mpl.lines.Line2D).get(\"color\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 107, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([], [], scalex=False, scaley=False, color=color)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 108, "name": "get_color", "kind": "ref", "category": "function", "info": " color = scout.get_color()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 121, "name": "method", "kind": "ref", "category": "function", "info": " scout = method(scout_x, scout_y, **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 122, "name": "get_facecolors", "kind": "ref", "category": "function", "info": " facecolors = scout.get_facecolors()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 143, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([np.nan], [np.nan], **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 144, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " color = to_rgb(scout.get_facecolor())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 158, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 158, "name": "Version", "kind": "ref", "category": "function", "info": " if Version(mpl.__version__) < Version(\"3.3\") and datetime_axis:\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 161, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 163, "name": "method", "kind": "ref", "category": "function", "info": " scout = method([], [], **kws)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 164, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " facecolor = scout.get_facecolor()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 171, "name": "desaturate", "kind": "def", "category": "function", "info": "def desaturate(color, prop):\n \"\"\"Decrease the saturation channel of a color by some percent.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n prop : float\n saturation channel of color will be multiplied by this value\n\n Returns\n -------\n new_color : rgb tuple\n desaturated color code in RGB tuple representation\n\n \"\"\"\n # Check inputs\n if not 0 <= prop <= 1:\n raise ValueError(\"prop must be between 0 and 1\")\n\n # Get rgb tuple rep\n rgb = to_rgb(color)\n\n # Convert to hls\n h, l, s = colorsys.rgb_to_hls(*rgb)\n\n # Desaturate the saturation channel\n s *= prop\n\n # Convert back to rgb\n new_color = colorsys.hls_to_rgb(h, l, s)\n\n return new_color\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 206, "name": "saturate", "kind": "def", "category": "function", "info": "def saturate(color):\n \"\"\"Return a fully saturated color with the same hue.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n\n Returns\n -------\n new_color : rgb tuple\n saturated color code in RGB tuple representation\n\n \"\"\"\n return set_hls_values(color, s=1)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 220, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " return set_hls_values(color, s=1)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 223, "name": "set_hls_values", "kind": "def", "category": "function", "info": "def set_hls_values(color, h=None, l=None, s=None): # noqa\n \"\"\"Independently manipulate the h, l, or s channels of a color.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n h, l, s : floats between 0 and 1, or None\n new values for each channel in hls space\n\n Returns\n -------\n new_color : rgb tuple\n new color code in RGB tuple representation\n\n \"\"\"\n # Get an RGB tuple representation\n rgb = to_rgb(color)\n vals = list(colorsys.rgb_to_hls(*rgb))\n for i, val in enumerate([h, l, s]):\n if val is not None:\n vals[i] = val\n\n rgb = colorsys.hls_to_rgb(*vals)\n return rgb\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 250, "name": "axlabel", "kind": "def", "category": "function", "info": "def axlabel(xlabel, ylabel, **kwargs):\n \"\"\"Grab current axis and label it.\n\n DEPRECATED: will be removed in a future version.\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg, FutureWarning)\n ax = plt.gca()\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 259, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 260, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 263, "name": "remove_na", "kind": "def", "category": "function", "info": "def remove_na(vector):\n \"\"\"Helper method for removing null values from data vectors.\n\n Parameters\n ----------\n vector : vector object\n Must implement boolean masking with [] subscript syntax.\n\n Returns\n -------\n clean_clean : same type as ``vector``\n Vector of data with null values removed. May be a copy or a view.\n\n \"\"\"\n return vector[pd.notnull(vector)]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 280, "name": "get_color_cycle", "kind": "def", "category": "function", "info": "def get_color_cycle():\n \"\"\"Return the list of colors in the current matplotlib color cycle\n\n Parameters\n ----------\n None\n\n Returns\n -------\n colors : list\n List of matplotlib colors in the current cycle, or dark gray if\n the current color cycle is empty.\n \"\"\"\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 294, "name": "by_key", "kind": "ref", "category": "function", "info": " return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 297, "name": "despine", "kind": "def", "category": "function", "info": "def despine(fig=None, ax=None, top=True, right=True, left=False,\n bottom=False, offset=None, trim=False):\n \"\"\"Remove the top and right spines from plot(s).\n\n fig : matplotlib figure, optional\n Figure to despine all axes of, defaults to the current figure.\n ax : matplotlib axes, optional\n Specific axes object to despine. Ignored if fig is provided.\n top, right, left, bottom : boolean, optional\n If True, remove that spine.\n offset : int or dict, optional\n Absolute distance, in points, spines should be moved away\n from the axes (negative values move spines inward). A single value\n applies to all spines; a dict can be used to set offset values per\n side.\n trim : bool, optional\n If True, limit spines to the smallest and largest major tick\n on each non-despined axis.\n\n Returns\n -------\n None\n\n \"\"\"\n # Get references to the axes we want\n if fig is None and ax is None:\n axes = plt.gcf().axes\n elif fig is not None:\n axes = fig.axes\n elif ax is not None:\n axes = [ax]\n\n for ax_i in axes:\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n # Toggle the spine objects\n is_visible = not locals()[side]\n ax_i.spines[side].set_visible(is_visible)\n if offset is not None and is_visible:\n try:\n val = offset.get(side, 0)\n except AttributeError:\n val = offset\n ax_i.spines[side].set_position(('outward', val))\n\n # Potentially move the ticks\n if left and not right:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.minorTicks\n )\n ax_i.yaxis.set_ticks_position(\"right\")\n for t in ax_i.yaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.yaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if bottom and not top:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.minorTicks\n )\n ax_i.xaxis.set_ticks_position(\"top\")\n for t in ax_i.xaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.xaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if trim:\n # clip off the parts of the spines that extend past major ticks\n xticks = np.asarray(ax_i.get_xticks())\n if xticks.size:\n firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n xticks)[0]\n lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n xticks)[-1]\n ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n ax_i.spines['top'].set_bounds(firsttick, lasttick)\n newticks = xticks.compress(xticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_xticks(newticks)\n\n yticks = np.asarray(ax_i.get_yticks())\n if yticks.size:\n firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n yticks)[0]\n lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n yticks)[-1]\n ax_i.spines['left'].set_bounds(firsttick, lasttick)\n ax_i.spines['right'].set_bounds(firsttick, lasttick)\n newticks = yticks.compress(yticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_yticks(newticks)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 333, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_visible(is_visible)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 339, "name": "set_position", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_position(('outward', val))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 344, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 348, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 351, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.yaxis.set_ticks_position(\"right\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 353, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 355, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 359, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 363, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 366, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.xaxis.set_ticks_position(\"top\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 368, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 370, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 374, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = np.asarray(ax_i.get_xticks())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 376, "name": "get_xlim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 378, "name": "get_xlim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 380, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 381, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['top'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 384, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax_i.set_xticks(newticks)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 386, "name": "get_yticks", "kind": "ref", "category": "function", "info": " yticks = np.asarray(ax_i.get_yticks())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 388, "name": "get_ylim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 390, "name": "get_ylim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 392, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['left'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 393, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['right'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 396, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax_i.set_yticks(newticks)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 399, "name": "move_legend", "kind": "def", "category": "function", "info": "def move_legend(obj, loc, **kwargs):\n \"\"\"\n Recreate a plot's legend at a new location.\n\n The name is a slight misnomer. Matplotlib legends do not expose public\n control over their position parameters. So this function creates a new legend,\n copying over the data from the original object, which is then removed.\n\n Parameters\n ----------\n obj : the object with the plot\n This argument can be either a seaborn or matplotlib object:\n\n - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`\n - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`\n\n loc : str or int\n Location argument, as in :meth:`matplotlib.axes.Axes.legend`.\n\n kwargs\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.\n\n Examples\n --------\n\n .. include:: ../docstrings/move_legend.rst\n\n \"\"\"\n # This is a somewhat hackish solution that will hopefully be obviated by\n # upstream improvements to matplotlib legends that make them easier to\n # modify after creation.\n\n from seaborn.axisgrid import Grid # Avoid circular import\n\n # Locate the legend object and a method to recreate the legend\n if isinstance(obj, Grid):\n old_legend = obj.legend\n legend_func = obj.figure.legend\n elif isinstance(obj, mpl.axes.Axes):\n old_legend = obj.legend_\n legend_func = obj.legend\n elif isinstance(obj, mpl.figure.Figure):\n if obj.legends:\n old_legend = obj.legends[-1]\n else:\n old_legend = None\n legend_func = obj.legend\n else:\n err = \"`obj` must be a seaborn Grid or matplotlib Axes or Figure instance.\"\n raise TypeError(err)\n\n if old_legend is None:\n err = f\"{obj} has no legend attached.\"\n raise ValueError(err)\n\n # Extract the components of the legend we need to reuse\n handles = old_legend.legendHandles\n labels = [t.get_text() for t in old_legend.get_texts()]\n\n # Extract legend properties that can be passed to the recreation method\n # (Vexingly, these don't all round-trip)\n legend_kws = inspect.signature(mpl.legend.Legend).parameters\n props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n\n # Delegate default bbox_to_anchor rules to matplotlib\n props.pop(\"bbox_to_anchor\")\n\n # Try to propagate the existing title and font properties; respect new ones too\n title = props.pop(\"title\")\n if \"title\" in kwargs:\n title.set_text(kwargs.pop(\"title\"))\n title_kwargs = {k: v for k, v in kwargs.items() if k.startswith(\"title_\")}\n for key, val in title_kwargs.items():\n title.set(**{key[6:]: val})\n kwargs.pop(key)\n\n # Try to respect the frame visibility\n kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n\n # Remove the old legend and create the new one\n props.update(kwargs)\n old_legend.remove()\n new_legend = legend_func(handles, labels, loc=loc, **props)\n new_legend.set_title(title.get_text(), title.get_fontproperties())\n\n # Let the Grid object continue to track the correct legend object\n if isinstance(obj, Grid):\n obj._legend = new_legend\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 456, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 456, "name": "get_texts", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 461, "name": "properties", "kind": "ref", "category": "function", "info": " props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 469, "name": "set_text", "kind": "ref", "category": "function", "info": " title.set_text(kwargs.pop(\"title\"))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 476, "name": "get_visible", "kind": "ref", "category": "function", "info": " kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 481, "name": "legend_func", "kind": "ref", "category": "function", "info": " new_legend = legend_func(handles, labels, loc=loc, **props)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 482, "name": "set_title", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 482, "name": "get_text", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 482, "name": "get_fontproperties", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 489, "name": "_kde_support", "kind": "def", "category": "function", "info": "def _kde_support(data, bw, gridsize, cut, clip):\n \"\"\"Establish support for a kernel density estimate.\"\"\"\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n support = np.linspace(support_min, support_max, gridsize)\n\n return support\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 498, "name": "ci", "kind": "def", "category": "function", "info": "def ci(a, which=95, axis=None):\n \"\"\"Return a percentile range from an array of values.\"\"\"\n p = 50 - which / 2, 50 + which / 2\n return np.nanpercentile(a, p, axis)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 504, "name": "get_dataset_names", "kind": "def", "category": "function", "info": "def get_dataset_names():\n \"\"\"Report available example datasets, useful for reporting issues.\n\n Requires an internet connection.\n\n \"\"\"\n url = \"https://github.com/mwaskom/seaborn-data\"\n with urlopen(url) as resp:\n html = resp.read()\n\n pat = r\"/mwaskom/seaborn-data/blob/master/(\\w*).csv\"\n datasets = re.findall(pat, html.decode())\n return datasets\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 515, "name": "decode", "kind": "ref", "category": "function", "info": " datasets = re.findall(pat, html.decode())\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 519, "name": "get_data_home", "kind": "def", "category": "function", "info": "def get_data_home(data_home=None):\n \"\"\"Return a path to the cache directory for example datasets.\n\n This directory is used by :func:`load_dataset`.\n\n If the ``data_home`` argument is not provided, it will use a directory\n specified by the `SEABORN_DATA` environment variable (if it exists)\n or otherwise default to an OS-appropriate user cache location.\n\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 530, "name": "user_cache_dir", "kind": "ref", "category": "function", "info": " data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 531, "name": "expanduser", "kind": "ref", "category": "function", "info": " data_home = os.path.expanduser(data_home)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 532, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(data_home):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 537, "name": "load_dataset", "kind": "def", "category": "function", "info": "def load_dataset(name, cache=True, data_home=None, **kws):\n \"\"\"Load an example dataset from the online repository (requires internet).\n\n This function provides quick access to a small number of example datasets\n that are useful for documenting seaborn or generating reproducible examples\n for bug reports. It is not necessary for normal usage.\n\n Note that some of the datasets have a small amount of preprocessing applied\n to define a proper ordering for categorical variables.\n\n Use :func:`get_dataset_names` to see a list of available datasets.\n\n Parameters\n ----------\n name : str\n Name of the dataset (``{name}.csv`` on\n https://github.com/mwaskom/seaborn-data).\n cache : boolean, optional\n If True, try to load from the local cache first, and save to the cache\n if a download is required.\n data_home : string, optional\n The directory in which to cache data; see :func:`get_data_home`.\n kws : keys and values, optional\n Additional keyword arguments are passed to passed through to\n :func:`pandas.read_csv`.\n\n Returns\n -------\n df : :class:`pandas.DataFrame`\n Tabular data, possibly with some preprocessing applied.\n\n \"\"\"\n # A common beginner mistake is to assume that one's personal data needs\n # to be passed through this function to be usable with seaborn.\n # Let's provide a more helpful error than you would otherwise get.\n if isinstance(name, pd.DataFrame):\n err = (\n \"This function accepts only strings (the name of an example dataset). \"\n \"You passed a pandas DataFrame. If you have your own dataset, \"\n \"it is not necessary to use this function before plotting.\"\n )\n raise TypeError(err)\n\n url = f\"https://raw.githubusercontent.com/mwaskom/seaborn-data/master/{name}.csv\"\n\n if cache:\n cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n if not os.path.exists(cache_path):\n if name not in get_dataset_names():\n raise ValueError(f\"'{name}' is not one of the example datasets.\")\n urlretrieve(url, cache_path)\n full_path = cache_path\n else:\n full_path = url\n\n df = pd.read_csv(full_path, **kws)\n\n if df.iloc[-1].isnull().all():\n df = df.iloc[:-1]\n\n # Set some columns as a categorical type with ordered levels\n\n if name == \"tips\":\n df[\"day\"] = pd.Categorical(df[\"day\"], [\"Thur\", \"Fri\", \"Sat\", \"Sun\"])\n df[\"sex\"] = pd.Categorical(df[\"sex\"], [\"Male\", \"Female\"])\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"Lunch\", \"Dinner\"])\n df[\"smoker\"] = pd.Categorical(df[\"smoker\"], [\"Yes\", \"No\"])\n\n elif name == \"flights\":\n months = df[\"month\"].str[:3]\n df[\"month\"] = pd.Categorical(months, months.unique())\n\n elif name == \"exercise\":\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"1 min\", \"15 min\", \"30 min\"])\n df[\"kind\"] = pd.Categorical(df[\"kind\"], [\"rest\", \"walking\", \"running\"])\n df[\"diet\"] = pd.Categorical(df[\"diet\"], [\"no fat\", \"low fat\"])\n\n elif name == \"titanic\":\n df[\"class\"] = pd.Categorical(df[\"class\"], [\"First\", \"Second\", \"Third\"])\n df[\"deck\"] = pd.Categorical(df[\"deck\"], list(\"ABCDEFG\"))\n\n elif name == \"penguins\":\n df[\"sex\"] = df[\"sex\"].str.title()\n\n elif name == \"diamonds\":\n df[\"color\"] = pd.Categorical(\n df[\"color\"], [\"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n )\n df[\"clarity\"] = pd.Categorical(\n df[\"clarity\"], [\"IF\", \"VVS1\", \"VVS2\", \"VS1\", \"VS2\", \"SI1\", \"SI2\", \"I1\"],\n )\n df[\"cut\"] = pd.Categorical(\n df[\"cut\"], [\"Ideal\", \"Premium\", \"Very Good\", \"Good\", \"Fair\"],\n )\n\n elif name == \"taxis\":\n df[\"pickup\"] = pd.to_datetime(df[\"pickup\"])\n df[\"dropoff\"] = pd.to_datetime(df[\"dropoff\"])\n\n elif name == \"seaice\":\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n elif name == \"dowjones\":\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n return df\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 583, "name": "get_data_home", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 583, "name": "basename", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 584, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(cache_path):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 585, "name": "get_dataset_names", "kind": "ref", "category": "function", "info": " if name not in get_dataset_names():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 645, "name": "axis_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axis_ticklabels_overlap(labels):\n \"\"\"Return a boolean for whether the list of ticklabels have overlaps.\n\n Parameters\n ----------\n labels : list of matplotlib ticklabels\n\n Returns\n -------\n overlap : boolean\n True if any of the labels overlap.\n\n \"\"\"\n if not labels:\n return False\n try:\n bboxes = [l.get_window_extent() for l in labels]\n overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n return max(overlaps) > 1\n except RuntimeError:\n # Issue on macos backend raises an error in the above code\n return False\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 661, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " bboxes = [l.get_window_extent() for l in labels]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 662, "name": "count_overlaps", "kind": "ref", "category": "function", "info": " overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 669, "name": "axes_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axes_ticklabels_overlap(ax):\n \"\"\"Return booleans for whether the x and y ticklabels on an Axes overlap.\n\n Parameters\n ----------\n ax : matplotlib Axes\n\n Returns\n -------\n x_overlap, y_overlap : booleans\n True when the labels on that axis overlap.\n\n \"\"\"\n return (axis_ticklabels_overlap(ax.get_xticklabels()),\n axis_ticklabels_overlap(ax.get_yticklabels()))\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 682, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 682, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 683, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 683, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 686, "name": "locator_to_legend_entries", "kind": "def", "category": "function", "info": "def locator_to_legend_entries(locator, limits, dtype):\n \"\"\"Return levels and formatted levels for brief numeric legends.\"\"\"\n raw_levels = locator.tick_values(*limits).astype(dtype)\n\n # The locator can return ticks outside the limits, clip them here\n raw_levels = [l for l in raw_levels if l >= limits[0] and l <= limits[1]]\n\n class dummy_axis:\n def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n # Avoid having an offset/scientific notation which we don't currently\n # have any way of representing in the legend\n formatter.set_useOffset(False)\n formatter.set_scientific(False)\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 688, "name": "tick_values", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 688, "name": "astype", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 693, "name": "dummy_axis", "kind": "def", "category": "class", "info": "get_view_interval"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 694, "name": "get_view_interval", "kind": "def", "category": "function", "info": " def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n # Avoid having an offset/scientific notation which we don't currently\n # have any way of representing in the legend\n formatter.set_useOffset(False)\n formatter.set_scientific(False)\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 703, "name": "set_useOffset", "kind": "ref", "category": "function", "info": " formatter.set_useOffset(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 704, "name": "set_scientific", "kind": "ref", "category": "function", "info": " formatter.set_scientific(False)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 705, "name": "dummy_axis", "kind": "ref", "category": "function", "info": " formatter.axis = dummy_axis()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 710, "name": "set_locs", "kind": "ref", "category": "function", "info": " formatter.set_locs(raw_levels)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 711, "name": "formatter", "kind": "ref", "category": "function", "info": " formatted_levels = [formatter(x) for x in raw_levels]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 716, "name": "relative_luminance", "kind": "def", "category": "function", "info": "def relative_luminance(color):\n \"\"\"Calculate the relative luminance of a color according to W3C standards\n\n Parameters\n ----------\n color : matplotlib color or sequence of matplotlib colors\n Hex code, rgb-tuple, or html color name.\n\n Returns\n -------\n luminance : float(s) between 0 and 1\n\n \"\"\"\n rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)\n lum = rgb.dot([.2126, .7152, .0722])\n try:\n return lum.item()\n except ValueError:\n return lum\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 729, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 733, "name": "item", "kind": "ref", "category": "function", "info": " return lum.item()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 738, "name": "to_utf8", "kind": "def", "category": "function", "info": "def to_utf8(obj):\n \"\"\"Return a string representing a Python object.\n\n Strings (i.e. type ``str``) are returned unchanged.\n\n Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.\n\n For other objects, the method ``__str__()`` is called, and the result is\n returned as a string.\n\n Parameters\n ----------\n obj : object\n Any Python object\n\n Returns\n -------\n s : str\n UTF-8-decoded string representation of ``obj``\n\n \"\"\"\n if isinstance(obj, str):\n return obj\n try:\n return obj.decode(encoding=\"utf-8\")\n except AttributeError: # obj is not bytes-like\n return str(obj)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 762, "name": "decode", "kind": "ref", "category": "function", "info": " return obj.decode(encoding=\"utf-8\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 767, "name": "_normalize_kwargs", "kind": "def", "category": "function", "info": "def _normalize_kwargs(kws, artist):\n \"\"\"Wrapper for mpl.cbook.normalize_kwargs that supports <= 3.2.1.\"\"\"\n _alias_map = {\n 'color': ['c'],\n 'linewidth': ['lw'],\n 'linestyle': ['ls'],\n 'facecolor': ['fc'],\n 'edgecolor': ['ec'],\n 'markerfacecolor': ['mfc'],\n 'markeredgecolor': ['mec'],\n 'markeredgewidth': ['mew'],\n 'markersize': ['ms']\n }\n try:\n kws = normalize_kwargs(kws, artist)\n except AttributeError:\n kws = normalize_kwargs(kws, _alias_map)\n return kws\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 787, "name": "_check_argument", "kind": "def", "category": "function", "info": "def _check_argument(param, options, value):\n \"\"\"Raise if value for param is not in options.\"\"\"\n if value not in options:\n raise ValueError(\n f\"`{param}` must be one of {options}, but {repr(value)} was passed.\"\n )\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 795, "name": "_assign_default_kwargs", "kind": "def", "category": "function", "info": "def _assign_default_kwargs(kws, call_func, source_func):\n \"\"\"Assign default kwargs for call_func using values from source_func.\"\"\"\n # This exists so that axes-level functions and figure-level functions can\n # both call a Plotter method while having the default kwargs be defined in\n # the signature of the axes-level function.\n # An alternative would be to have a decorator on the method that sets its\n # defaults based on those defined in the axes-level function.\n # Then the figure-level function would not need to worry about defaults.\n # I am not sure which is better.\n needed = inspect.signature(call_func).parameters\n defaults = inspect.signature(source_func).parameters\n\n for param in needed:\n if param in defaults and param not in kws:\n kws[param] = defaults[param].default\n\n return kws\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 814, "name": "adjust_legend_subtitles", "kind": "def", "category": "function", "info": "def adjust_legend_subtitles(legend):\n \"\"\"\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n \"\"\"\n # Legend title not in rcParams until 3.0\n font_size = plt.rcParams.get(\"legend.title_fontsize\", None)\n hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n for hpack in hpackers:\n draw_area, text_area = hpack.get_children()\n handles = draw_area.get_children()\n if not all(artist.get_visible() for artist in handles):\n draw_area.set_width(0)\n for text in text_area.get_children():\n if font_size is not None:\n text.set_size(font_size)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 823, "name": "get_children", "kind": "ref", "category": "function", "info": " hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 825, "name": "get_children", "kind": "ref", "category": "function", "info": " draw_area, text_area = hpack.get_children()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 826, "name": "get_children", "kind": "ref", "category": "function", "info": " handles = draw_area.get_children()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 827, "name": "get_visible", "kind": "ref", "category": "function", "info": " if not all(artist.get_visible() for artist in handles):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 828, "name": "set_width", "kind": "ref", "category": "function", "info": " draw_area.set_width(0)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 829, "name": "get_children", "kind": "ref", "category": "function", "info": " for text in text_area.get_children():\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 831, "name": "set_size", "kind": "ref", "category": "function", "info": " text.set_size(font_size)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 834, "name": "_deprecate_ci", "kind": "def", "category": "function", "info": "def _deprecate_ci(errorbar, ci):\n \"\"\"\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n \"\"\"\n if ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n msg = (\n \"\\n\\nThe `ci` parameter is deprecated. \"\n f\"Use `errorbar={repr(errorbar)}` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return errorbar\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 860, "name": "_disable_autolayout", "kind": "def", "category": "function", "info": "def _disable_autolayout():\n \"\"\"Context manager for preventing rc-controlled auto-layout behavior.\"\"\"\n # This is a workaround for an issue in matplotlib, for details see\n # https://github.com/mwaskom/seaborn/issues/2914\n # The only affect of this rcParam is to set the default value for\n # layout= in plt.figure, so we could just do that instead.\n # But then we would need to own the complexity of the transition\n # from tight_layout=True -> layout=\"tight\". This seems easier,\n # but can be removed when (if) that is simpler on the matplotlib side,\n # or if the layout algorithms are improved to handle figure legends.\n orig_val = mpl.rcParams[\"figure.autolayout\"]\n try:\n mpl.rcParams[\"figure.autolayout\"] = False\n yield\n finally:\n mpl.rcParams[\"figure.autolayout\"] = orig_val\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 7, "name": "interact", "kind": "def", "category": "function", "info": " def interact(f):\n msg = \"Interactive palettes require `ipywidgets`, which is not installed.\"\n raise ImportError(msg)\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 21, "name": "_init_mutable_colormap", "kind": "def", "category": "function", "info": "def _init_mutable_colormap():\n \"\"\"Create a matplotlib colormap that will be updated by the widgets.\"\"\"\n greys = color_palette(\"Greys\", 256)\n cmap = LinearSegmentedColormap.from_list(\"interactive\", greys)\n cmap._init()\n cmap._set_extremes()\n return cmap\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 23, "name": "color_palette", "kind": "ref", "category": "function", "info": " greys = color_palette(\"Greys\", 256)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 30, "name": "_update_lut", "kind": "def", "category": "function", "info": "def _update_lut(cmap, colors):\n \"\"\"Change the LUT values in a matplotlib colormap in-place.\"\"\"\n cmap._lut[:256] = colors\n cmap._set_extremes()\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 36, "name": "_show_cmap", "kind": "def", "category": "function", "info": "def _show_cmap(cmap):\n \"\"\"Show a continuous matplotlib colormap.\"\"\"\n from .rcmod import axes_style # Avoid circular import\n with axes_style(\"white\"):\n f, ax = plt.subplots(figsize=(8.25, .75))\n ax.set(xticks=[], yticks=[])\n x = np.linspace(0, 1, 256)[np.newaxis, :]\n ax.pcolormesh(x, cmap=cmap)\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 39, "name": "axes_style", "kind": "ref", "category": "function", "info": " with axes_style(\"white\"):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 46, "name": "choose_colorbrewer_palette", "kind": "def", "category": "function", "info": "def choose_colorbrewer_palette(data_type, as_cmap=False):\n \"\"\"Select a palette from the ColorBrewer set.\n\n These palettes are built into matplotlib and can be used by name in\n many seaborn functions, or by passing the object returned by this function.\n\n Parameters\n ----------\n data_type : {'sequential', 'diverging', 'qualitative'}\n This describes the kind of data you want to visualize. See the seaborn\n color palette docs for more information about how to choose this value.\n Note that you can pass substrings (e.g. 'q' for 'qualitative.\n\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette from selected colors.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n\n \"\"\"\n if data_type.startswith(\"q\") and as_cmap:\n raise ValueError(\"Qualitative palettes cannot be colormaps.\")\n\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if data_type.startswith(\"s\"):\n opts = [\"Greys\", \"Reds\", \"Greens\", \"Blues\", \"Oranges\", \"Purples\",\n \"BuGn\", \"BuPu\", \"GnBu\", \"OrRd\", \"PuBu\", \"PuRd\", \"RdPu\", \"YlGn\",\n \"PuBuGn\", \"YlGnBu\", \"YlOrBr\", \"YlOrRd\"]\n variants = [\"regular\", \"reverse\", \"dark\"]\n\n @interact\n def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 83, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 92, "name": "choose_sequential", "kind": "def", "category": "function", "info": " def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 93, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 101, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 102, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 103, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 105, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 106, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 114, "name": "choose_diverging", "kind": "def", "category": "function", "info": " def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 115, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 120, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 121, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 122, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 124, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 125, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 132, "name": "choose_qualitative", "kind": "def", "category": "function", "info": " def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 133, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1)):\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 134, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 135, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 142, "name": "choose_dark_palette", "kind": "def", "category": "function", "info": "def choose_dark_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a dark sequential palette.\n\n This corresponds with the :func:`dark_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`dark_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 175, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 179, "name": "choose_dark_palette_rgb", "kind": "def", "category": "function", "info": " def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 185, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 186, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 187, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 189, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 190, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 194, "name": "choose_dark_palette_hls", "kind": "def", "category": "function", "info": " def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 200, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 201, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 202, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 204, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 205, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 209, "name": "choose_dark_palette_husl", "kind": "def", "category": "function", "info": " def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 215, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 216, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 217, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 219, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 220, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 227, "name": "choose_light_palette", "kind": "def", "category": "function", "info": "def choose_light_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a light sequential palette.\n\n This corresponds with the :func:`light_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`light_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n dark_palette : Create a sequential palette with dark low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 260, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 264, "name": "choose_light_palette_rgb", "kind": "def", "category": "function", "info": " def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 270, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 271, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 272, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 274, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 275, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 279, "name": "choose_light_palette_hls", "kind": "def", "category": "function", "info": " def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 285, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 286, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 287, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 289, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 290, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 294, "name": "choose_light_palette_husl", "kind": "def", "category": "function", "info": " def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 300, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 301, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 302, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 304, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 305, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 312, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": "def choose_diverging_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to choose a diverging color palette.\n\n This corresponds with the :func:`diverging_palette` function. This kind\n of palette is good for data that range between interesting low values\n and interesting high values with a meaningful midpoint. (For example,\n change scores relative to some baseline value).\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n diverging_palette : Create a diverging color palette or colormap.\n choose_colorbrewer_palette : Interactively choose palettes from the\n colorbrewer set, including diverging palettes.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 342, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 345, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": " def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 346, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_neg=IntSlider(min=0,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 349, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_pos=IntSlider(min=0,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 352, "name": "IntSlider", "kind": "ref", "category": "function", "info": " s=IntSlider(min=0, max=99, value=74),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 353, "name": "IntSlider", "kind": "ref", "category": "function", "info": " l=IntSlider(min=0, max=99, value=50), # noqa: E741\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 354, "name": "IntSlider", "kind": "ref", "category": "function", "info": " sep=IntSlider(min=1, max=50, value=10),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 359, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 360, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 361, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 363, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 364, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 371, "name": "choose_cubehelix_palette", "kind": "def", "category": "function", "info": "def choose_cubehelix_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to create a sequential cubehelix palette.\n\n This corresponds with the :func:`cubehelix_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values. The cubehelix system allows the\n palette to have more hue variance across the range, which can be helpful\n for distinguishing a wider range of values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 401, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 404, "name": "choose_cubehelix", "kind": "def", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 404, "name": "IntSlider", "kind": "ref", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 405, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " start=FloatSlider(min=0, max=3, value=0),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 406, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " rot=FloatSlider(min=-1, max=1, value=.4),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 407, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " gamma=FloatSlider(min=0, max=5, value=1),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 408, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " hue=FloatSlider(min=0, max=1, value=.8),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 409, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " light=FloatSlider(min=0, max=1, value=.85),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 410, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " dark=FloatSlider(min=0, max=1, value=.15),\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 414, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = cubehelix_palette(256, start, rot, gamma,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 416, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 417, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 419, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n"}, {"fname": "playground/a13ce8f0-7ca5-4959-a176-4f15f1385e2d/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 421, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}] \ No newline at end of file diff --git a/tags_mwaskom__seaborn-3407.json b/tags_mwaskom__seaborn-3407.json new file mode 100644 index 0000000000000000000000000000000000000000..74f08721beee712467de43f21cbbe44df711c842 --- /dev/null +++ b/tags_mwaskom__seaborn-3407.json @@ -0,0 +1 @@ +[{"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 11, "name": "rglob", "kind": "ref", "category": "function", "info": "py_files = path.rglob(\"*.py\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 12, "name": "rglob", "kind": "ref", "category": "function", "info": "ipynb_files = path.rglob(\"*.ipynb\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 18, "name": "read", "kind": "ref", "category": "function", "info": " datasets += re.findall(r\"load_dataset\\(['\\\"](\\w+)['\\\"]\", fid.read())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 22, "name": "read", "kind": "ref", "category": "function", "info": " datasets += re.findall(r\"load_dataset\\(\\\\['\\\"](\\w+)\\\\['\\\"]\", fid.read())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/ci/cache_datasets.py", "rel_fname": "ci/cache_datasets.py", "line": 26, "name": "load_dataset", "kind": "ref", "category": "function", "info": " load_dataset(name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/ci/check_gallery.py", "rel_fname": "ci/check_gallery.py", "line": 12, "name": "read", "kind": "ref", "category": "function", "info": " exec(fid.read())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 18, "name": "abspath", "kind": "ref", "category": "function", "info": "sys.path.insert(0, os.path.abspath('sphinxext'))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/conf.py", "rel_fname": "doc/conf.py", "line": 125, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(path):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 21, "name": "execfile", "kind": "def", "category": "function", "info": "def execfile(filename, globals=None, locals=None):\n with open(filename, \"rb\") as fp:\n exec(compile(fp.read(), filename, 'exec'), globals, locals)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 127, "name": "create_thumbnail", "kind": "def", "category": "function", "info": "def create_thumbnail(infile, thumbfile,\n width=275, height=275,\n cx=0.5, cy=0.5, border=4):\n baseout, extout = op.splitext(thumbfile)\n\n im = matplotlib.image.imread(infile)\n rows, cols = im.shape[:2]\n x0 = int(cx * cols - .5 * width)\n y0 = int(cy * rows - .5 * height)\n xslice = slice(x0, x0 + width)\n yslice = slice(y0, y0 + height)\n thumb = im[yslice, xslice]\n thumb[:border, :, :3] = thumb[-border:, :, :3] = 0\n thumb[:, :border, :3] = thumb[:, -border:, :3] = 0\n\n dpi = 100\n fig = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi)\n\n ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n frameon=False, xticks=[], yticks=[])\n if all(thumb.shape):\n ax.imshow(thumb, aspect='auto', resample=True,\n interpolation='bilinear')\n else:\n warnings.warn(\n f\"Bad thumbnail crop. {thumbfile} will be empty.\"\n )\n fig.savefig(thumbfile, dpi=dpi)\n return fig\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 145, "name": "add_axes", "kind": "ref", "category": "function", "info": " ax = fig.add_axes([0, 0, 1, 1], aspect='auto',\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 158, "name": "indent", "kind": "def", "category": "function", "info": "def indent(s, N=4):\n \"\"\"indent a string\"\"\"\n return s.replace('\\n', '\\n' + N * ' ')\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 163, "name": "ExampleGenerator", "kind": "def", "category": "class", "info": "__init__\tdirname\tfname\tmodulename\tpyfilename\trstfilename\thtmlfilename\tpngfilename\tthumbfilename\tsphinxtag\tpagetitle\tplotfunc\tcomponents\textract_docstring\texec_file\ttoctree_entry\tcontents_entry"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 169, "name": "extract_docstring", "kind": "ref", "category": "function", "info": " self.extract_docstring()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 179, "name": "exec_file", "kind": "ref", "category": "function", "info": " self.exec_file()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 188, "name": "fname", "kind": "def", "category": "function", "info": " def fname(self):\n return op.split(self.filename)[1]\n\n @property\n def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 192, "name": "modulename", "kind": "def", "category": "function", "info": " def modulename(self):\n return op.splitext(self.fname)[0]\n\n @property\n def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 196, "name": "pyfilename", "kind": "def", "category": "function", "info": " def pyfilename(self):\n return self.modulename + '.py'\n\n @property\n def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 200, "name": "rstfilename", "kind": "def", "category": "function", "info": " def rstfilename(self):\n return self.modulename + \".rst\"\n\n @property\n def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 204, "name": "htmlfilename", "kind": "def", "category": "function", "info": " def htmlfilename(self):\n return self.modulename + '.html'\n\n @property\n def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 208, "name": "pngfilename", "kind": "def", "category": "function", "info": " def pngfilename(self):\n pngfile = self.modulename + '.png'\n return \"_images/\" + pngfile\n\n @property\n def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 213, "name": "thumbfilename", "kind": "def", "category": "function", "info": " def thumbfilename(self):\n pngfile = self.modulename + '_thumb.png'\n return pngfile\n\n @property\n def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 218, "name": "sphinxtag", "kind": "def", "category": "function", "info": " def sphinxtag(self):\n return self.modulename\n\n @property\n def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 222, "name": "pagetitle", "kind": "def", "category": "function", "info": " def pagetitle(self):\n return self.docstring.strip().split('\\n')[0].strip()\n\n @property\n def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 226, "name": "plotfunc", "kind": "def", "category": "function", "info": " def plotfunc(self):\n match = re.search(r\"sns\\.(.+plot)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+map)\\(\", self.filetext)\n if match:\n return match.group(1)\n match = re.search(r\"sns\\.(.+Grid)\\(\", self.filetext)\n if match:\n return match.group(1)\n return \"\"\n\n @property\n def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 239, "name": "components", "kind": "def", "category": "function", "info": " def components(self):\n\n objects = re.findall(r\"sns\\.(\\w+)\\(\", self.filetext)\n\n refs = []\n for obj in objects:\n if obj[0].isupper():\n refs.append(f\":class:`{obj}`\")\n else:\n refs.append(f\":func:`{obj}`\")\n return \", \".join(refs)\n\n def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 251, "name": "extract_docstring", "kind": "def", "category": "function", "info": " def extract_docstring(self):\n \"\"\" Extract a module-level docstring\n \"\"\"\n lines = open(self.filename).readlines()\n start_row = 0\n if lines[0].startswith('#!'):\n lines.pop(0)\n start_row = 1\n\n docstring = ''\n first_par = ''\n line_iter = lines.__iter__()\n tokens = tokenize.generate_tokens(lambda: next(line_iter))\n for tok_type, tok_content, _, (erow, _), _ in tokens:\n tok_type = token.tok_name[tok_type]\n if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):\n continue\n elif tok_type == 'STRING':\n docstring = eval(tok_content)\n # If the docstring is formatted with several paragraphs,\n # extract the first one:\n paragraphs = '\\n'.join(line.rstrip()\n for line in docstring.split('\\n')\n ).split('\\n\\n')\n if len(paragraphs) > 0:\n first_par = paragraphs[0]\n break\n\n thumbloc = None\n for i, line in enumerate(docstring.split(\"\\n\")):\n m = re.match(r\"^_thumb: (\\.\\d+),\\s*(\\.\\d+)\", line)\n if m:\n thumbloc = float(m.group(1)), float(m.group(2))\n break\n if thumbloc is not None:\n self.thumbloc = thumbloc\n docstring = \"\\n\".join([l for l in docstring.split(\"\\n\")\n if not l.startswith(\"_thumb\")])\n\n self.docstring = docstring\n self.short_desc = first_par\n self.end_line = erow + 1 + start_row\n\n def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 254, "name": "readlines", "kind": "ref", "category": "function", "info": " lines = open(self.filename).readlines()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 294, "name": "exec_file", "kind": "def", "category": "function", "info": " def exec_file(self):\n print(f\"running {self.filename}\")\n\n plt.close('all')\n my_globals = {'pl': plt,\n 'plt': plt}\n execfile(self.filename, my_globals)\n\n fig = plt.gcf()\n fig.canvas.draw()\n pngfile = op.join(self.target_dir, self.pngfilename)\n thumbfile = op.join(\"example_thumbs\", self.thumbfilename)\n self.html = f\"\"\n fig.savefig(pngfile, dpi=75, bbox_inches=\"tight\")\n\n cx, cy = self.thumbloc\n create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n\n def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 300, "name": "execfile", "kind": "ref", "category": "function", "info": " execfile(self.filename, my_globals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 310, "name": "create_thumbnail", "kind": "ref", "category": "function", "info": " create_thumbnail(pngfile, thumbfile, cx=cx, cy=cy)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 312, "name": "toctree_entry", "kind": "def", "category": "function", "info": " def toctree_entry(self):\n return f\" ./{op.splitext(self.htmlfilename)[0]}\\n\\n\"\n\n def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 315, "name": "contents_entry", "kind": "def", "category": "function", "info": " def contents_entry(self):\n return (\".. raw:: html\\n\\n\"\n \" \\n\\n\"\n \"\\n\\n\"\n \"\".format(self.htmlfilename,\n self.thumbfilename,\n self.plotfunc))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 362, "name": "ExampleGenerator", "kind": "ref", "category": "function", "info": " ex = ExampleGenerator(filename, target_dir)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 377, "name": "toctree_entry", "kind": "ref", "category": "function", "info": " toctree += ex.toctree_entry()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 378, "name": "contents_entry", "kind": "ref", "category": "function", "info": " contents += ex.contents_entry()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/gallery_generator.py", "rel_fname": "doc/sphinxext/gallery_generator.py", "line": 391, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect('builder-inited', main)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 48, "name": "main", "kind": "def", "category": "function", "info": "def main(app):\n\n content_yaml = Path(app.builder.srcdir) / \"tutorial.yaml\"\n tutorial_rst = Path(app.builder.srcdir) / \"tutorial.rst\"\n\n tutorial_dir = Path(app.builder.srcdir) / \"tutorial\"\n tutorial_dir.mkdir(exist_ok=True)\n\n with open(content_yaml) as fid:\n sections = yaml.load(fid, yaml.BaseLoader)\n\n for section in sections:\n title = section[\"title\"]\n section[\"header\"] = \"\\n\".join([title, \"-\" * len(title)]) if title else \"\"\n\n env = Environment().from_string(TEMPLATE)\n content = env.render(sections=sections)\n\n with open(tutorial_rst, \"w\") as fid:\n fid.write(content)\n\n for section in sections:\n for page in section[\"pages\"]:\n if (\n not (svg_path := tutorial_dir / f\"{page}.svg\").exists()\n or svg_path.stat().st_mtime < Path(__file__).stat().st_mtime\n ):\n write_thumbnail(svg_path, page)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 64, "name": "render", "kind": "ref", "category": "function", "info": " content = env.render(sections=sections)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 67, "name": "write", "kind": "ref", "category": "function", "info": " fid.write(content)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 75, "name": "write_thumbnail", "kind": "ref", "category": "function", "info": " write_thumbnail(svg_path, page)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 78, "name": "write_thumbnail", "kind": "def", "category": "function", "info": "def write_thumbnail(svg_path, page):\n\n with (\n sns.axes_style(\"dark\"),\n sns.plotting_context(\"notebook\"),\n sns.color_palette(\"deep\")\n ):\n fig = globals()[page]()\n for ax in fig.axes:\n ax.set(xticklabels=[], yticklabels=[], xlabel=\"\", ylabel=\"\", title=\"\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n fig.tight_layout()\n fig.savefig(svg_path, format=\"svg\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 81, "name": "axes_style", "kind": "ref", "category": "function", "info": " sns.axes_style(\"dark\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 82, "name": "plotting_context", "kind": "ref", "category": "function", "info": " sns.plotting_context(\"notebook\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 83, "name": "color_palette", "kind": "ref", "category": "function", "info": " sns.color_palette(\"deep\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 90, "name": "tight_layout", "kind": "ref", "category": "function", "info": " fig.tight_layout()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 91, "name": "savefig", "kind": "ref", "category": "function", "info": " fig.savefig(svg_path, format=\"svg\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 94, "name": "introduction", "kind": "def", "category": "function", "info": "def introduction():\n\n tips = sns.load_dataset(\"tips\")\n fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n penguins = sns.load_dataset(\"penguins\")\n\n f = mpl.figure.Figure(figsize=(5, 5))\n with sns.axes_style(\"whitegrid\"):\n f.subplots(2, 2)\n\n sns.scatterplot(\n tips, x=\"total_bill\", y=\"tip\", hue=\"sex\", size=\"size\",\n alpha=.75, palette=[\"C0\", \".5\"], legend=False, ax=f.axes[0],\n )\n sns.kdeplot(\n tips.query(\"size != 5\"), x=\"total_bill\", hue=\"size\",\n palette=\"blend:C0,.5\", fill=True, linewidth=.5,\n legend=False, common_norm=False, ax=f.axes[1],\n )\n sns.lineplot(\n fmri, x=\"timepoint\", y=\"signal\", hue=\"event\",\n errorbar=(\"se\", 2), legend=False, palette=[\"C0\", \".5\"], ax=f.axes[2],\n )\n sns.boxplot(\n penguins, x=\"bill_depth_mm\", y=\"species\", hue=\"sex\",\n whiskerprops=dict(linewidth=1.5), medianprops=dict(linewidth=1.5),\n boxprops=dict(linewidth=1.5), capprops=dict(linewidth=0),\n width=.5, palette=[\"C0\", \".8\"], whis=5, ax=f.axes[3],\n )\n f.axes[3].legend_ = None\n for ax in f.axes:\n ax.set(xticks=[], yticks=[])\n return f\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 96, "name": "load_dataset", "kind": "ref", "category": "function", "info": " tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 97, "name": "load_dataset", "kind": "ref", "category": "function", "info": " fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 97, "name": "query", "kind": "ref", "category": "function", "info": " fmri = sns.load_dataset(\"fmri\").query(\"region == 'parietal'\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 98, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 100, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 101, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 102, "name": "subplots", "kind": "ref", "category": "function", "info": " f.subplots(2, 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 104, "name": "scatterplot", "kind": "ref", "category": "function", "info": " sns.scatterplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 108, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 109, "name": "query", "kind": "ref", "category": "function", "info": " tips.query(\"size != 5\"), x=\"total_bill\", hue=\"size\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 113, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 117, "name": "boxplot", "kind": "ref", "category": "function", "info": " sns.boxplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 129, "name": "function_overview", "kind": "def", "category": "function", "info": "def function_overview():\n\n from matplotlib.patches import FancyBboxPatch\n\n f = mpl.figure.Figure(figsize=(7, 5))\n with sns.axes_style(\"white\"):\n ax = f.subplots()\n f.subplots_adjust(0, 0, 1, 1)\n ax.set_axis_off()\n ax.set(xlim=(0, 1), ylim=(0, 1))\n\n deep = sns.color_palette(\"deep\")\n colors = dict(relational=deep[0], distributions=deep[1], categorical=deep[2])\n dark = sns.color_palette(\"dark\")\n text_colors = dict(relational=dark[0], distributions=dark[1], categorical=dark[2])\n\n functions = dict(\n relational=[\"scatterplot\", \"lineplot\"],\n distributions=[\"histplot\", \"kdeplot\", \"ecdfplot\", \"rugplot\"],\n categorical=[\n \"stripplot\", \"swarmplot\", \"boxplot\", \"violinplot\", \"pointplot\", \"barplot\"\n ],\n )\n pad, w, h = .06, .2, .15\n xs, y = np.arange(0, 1, 1 / 3) + pad * 1.05, .7\n for x, mod in zip(xs, functions):\n color = colors[mod] + (.2,)\n text_color = text_colors[mod]\n ax.add_artist(FancyBboxPatch((x, y), w, h, f\"round,pad={pad}\", color=\"white\"))\n ax.add_artist(FancyBboxPatch(\n (x, y), w, h, f\"round,pad={pad}\",\n linewidth=1, edgecolor=text_color, facecolor=color,\n ))\n ax.text(\n x + w / 2, y + h / 2, f\"{mod[:3]}plot\\n({mod})\",\n ha=\"center\", va=\"center\", size=20, color=text_color\n )\n for i, func in enumerate(functions[mod]):\n x_i, y_i = x + w / 2, y - i * .1 - h / 2 - pad\n xy = x_i - w / 2, y_i - pad / 3\n ax.add_artist(\n FancyBboxPatch(xy, w, h / 4, f\"round,pad={pad / 3}\", color=\"white\")\n )\n ax.add_artist(FancyBboxPatch(\n xy, w, h / 4, f\"round,pad={pad / 3}\",\n linewidth=1, edgecolor=text_color, facecolor=color\n ))\n ax.text(x_i, y_i, func, ha=\"center\", va=\"center\", size=16, color=text_color)\n ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)\n return f\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 133, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(7, 5))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 134, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 135, "name": "subplots", "kind": "ref", "category": "function", "info": " ax = f.subplots()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 136, "name": "subplots_adjust", "kind": "ref", "category": "function", "info": " f.subplots_adjust(0, 0, 1, 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 137, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 140, "name": "color_palette", "kind": "ref", "category": "function", "info": " deep = sns.color_palette(\"deep\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 142, "name": "color_palette", "kind": "ref", "category": "function", "info": " dark = sns.color_palette(\"dark\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 157, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch((x, y), w, h, f\"round,pad={pad}\", color=\"white\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 158, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 162, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 169, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 172, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(FancyBboxPatch(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 176, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(x_i, y_i, func, ha=\"center\", va=\"center\", size=16, color=text_color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 177, "name": "plot", "kind": "ref", "category": "function", "info": " ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 181, "name": "data_structure", "kind": "def", "category": "function", "info": "def data_structure():\n\n f = mpl.figure.Figure(figsize=(7, 5))\n gs = mpl.gridspec.GridSpec(\n figure=f, ncols=6, nrows=2, height_ratios=(1, 20),\n left=0, right=.35, bottom=0, top=.9, wspace=.1, hspace=.01\n )\n colors = [c + (.5,) for c in sns.color_palette(\"deep\")]\n f.add_subplot(gs[0, :], facecolor=\".8\")\n for i in range(gs.ncols):\n f.add_subplot(gs[1:, i], facecolor=colors[i])\n\n gs = mpl.gridspec.GridSpec(\n figure=f, ncols=2, nrows=2, height_ratios=(1, 8), width_ratios=(1, 11),\n left=.4, right=1, bottom=.2, top=.8, wspace=.015, hspace=.02\n )\n f.add_subplot(gs[0, 1:], facecolor=colors[2])\n f.add_subplot(gs[1:, 0], facecolor=colors[1])\n f.add_subplot(gs[1, 1], facecolor=colors[0])\n return f\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 183, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(7, 5))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 184, "name": "GridSpec", "kind": "ref", "category": "function", "info": " gs = mpl.gridspec.GridSpec(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 188, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = [c + (.5,) for c in sns.color_palette(\"deep\")]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 189, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[0, :], facecolor=\".8\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 191, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1:, i], facecolor=colors[i])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 193, "name": "GridSpec", "kind": "ref", "category": "function", "info": " gs = mpl.gridspec.GridSpec(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 197, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[0, 1:], facecolor=colors[2])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 198, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1:, 0], facecolor=colors[1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 199, "name": "add_subplot", "kind": "ref", "category": "function", "info": " f.add_subplot(gs[1, 1], facecolor=colors[0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 203, "name": "error_bars", "kind": "def", "category": "function", "info": "def error_bars():\n\n diamonds = sns.load_dataset(\"diamonds\")\n with sns.axes_style(\"whitegrid\"):\n g = sns.catplot(\n diamonds, x=\"carat\", y=\"clarity\", hue=\"clarity\", kind=\"point\",\n errorbar=(\"sd\", .5), join=False, legend=False, facet_kws={\"despine\": False},\n palette=\"ch:s=-.2,r=-.2,d=.4,l=.6_r\", scale=.75, capsize=.3,\n )\n g.ax.yaxis.set_inverted(False)\n return g.figure\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 205, "name": "load_dataset", "kind": "ref", "category": "function", "info": " diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 206, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 207, "name": "catplot", "kind": "ref", "category": "function", "info": " g = sns.catplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 212, "name": "set_inverted", "kind": "ref", "category": "function", "info": " g.ax.yaxis.set_inverted(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 218, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 223, "name": "Plot", "kind": "ref", "category": "function", "info": " p = so.Plot(x, y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 226, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps), color=map(str, x)),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 227, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\", pointsize=ps), alpha=x),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 228, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".9\", pointsize=ps, edgewidth=2), edgecolor=x),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 229, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\"), pointsize=x).scale(pointsize=(4, 18)),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 229, "name": "scale", "kind": "ref", "category": "function", "info": " p.add(so.Dot(color=\".3\"), pointsize=x).scale(pointsize=(4, 18)),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 230, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".9\", edgecolor=\".2\"), edgewidth=x),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 231, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".3\"), marker=map(str, x)),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 232, "name": "Dot", "kind": "ref", "category": "function", "info": " p.add(so.Dot(pointsize=ps, color=\".3\", marker=\"x\"), stroke=x),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 235, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 236, "name": "subplots", "kind": "ref", "category": "function", "info": " axs = f.subplots(len(plots))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 238, "name": "on", "kind": "ref", "category": "function", "info": " p.on(ax).plot()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 238, "name": "plot", "kind": "ref", "category": "function", "info": " p.on(ax).plot()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 240, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=ax, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 245, "name": "objects_interface", "kind": "def", "category": "function", "info": "def objects_interface():\n\n f = mpl.figure.Figure(figsize=(5, 4))\n C = sns.color_palette(\"deep\")\n ax = f.subplots()\n fontsize = 22\n rects = [((.135, .50), .69), ((.275, .38), .26), ((.59, .38), .40)]\n for i, (xy, w) in enumerate(rects):\n ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n ax.text(0, .52, \"Plot(data, 'x', 'y', color='var1')\", size=fontsize, color=\".2\")\n ax.text(0, .40, \".add(Dot(alpha=.5), marker='var2')\", size=fontsize, color=\".2\")\n annots = [\n (\"Mapped\\nin all layers\", (.48, .62), (0, 55)),\n (\"Set directly\", (.41, .35), (0, -55)),\n (\"Mapped\\nin this layer\", (.80, .35), (0, -55)),\n ]\n for i, (text, xy, xytext) in enumerate(annots):\n ax.annotate(\n text, xy, xytext,\n textcoords=\"offset points\", fontsize=18, ha=\"center\", va=\"center\",\n arrowprops=dict(arrowstyle=\"->\", linewidth=1.5, color=C[i]), color=C[i],\n )\n ax.set_axis_off()\n f.subplots_adjust(0, 0, 1, 1)\n\n return f\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 247, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 4))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 248, "name": "color_palette", "kind": "ref", "category": "function", "info": " C = sns.color_palette(\"deep\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 249, "name": "subplots", "kind": "ref", "category": "function", "info": " ax = f.subplots()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 253, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 253, "name": "Rectangle", "kind": "ref", "category": "function", "info": " ax.add_artist(mpl.patches.Rectangle(xy, w, .09, color=C[i], alpha=.2, lw=0))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 254, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(0, .52, \"Plot(data, 'x', 'y', color='var1')\", size=fontsize, color=\".2\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 255, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(0, .40, \".add(Dot(alpha=.5), marker='var2')\", size=fontsize, color=\".2\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 262, "name": "annotate", "kind": "ref", "category": "function", "info": " ax.annotate(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 267, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 268, "name": "subplots_adjust", "kind": "ref", "category": "function", "info": " f.subplots_adjust(0, 0, 1, 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 273, "name": "relational", "kind": "def", "category": "function", "info": "def relational():\n\n mpg = sns.load_dataset(\"mpg\")\n with sns.axes_style(\"ticks\"):\n g = sns.relplot(\n data=mpg, x=\"horsepower\", y=\"mpg\", size=\"displacement\", hue=\"weight\",\n sizes=(50, 500), hue_norm=(2000, 4500), alpha=.75, legend=False,\n palette=\"ch:start=-.5,rot=.7,dark=.3,light=.7_r\",\n )\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 275, "name": "load_dataset", "kind": "ref", "category": "function", "info": " mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 276, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 277, "name": "relplot", "kind": "ref", "category": "function", "info": " g = sns.relplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 282, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 286, "name": "distributions", "kind": "def", "category": "function", "info": "def distributions():\n\n penguins = sns.load_dataset(\"penguins\").dropna()\n with sns.axes_style(\"white\"):\n g = sns.displot(\n penguins, x=\"flipper_length_mm\", row=\"island\",\n binwidth=4, kde=True, line_kws=dict(linewidth=2), legend=False,\n )\n sns.despine(left=True)\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 288, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 288, "name": "dropna", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 289, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 290, "name": "displot", "kind": "ref", "category": "function", "info": " g = sns.displot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 294, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 295, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 299, "name": "categorical", "kind": "def", "category": "function", "info": "def categorical():\n\n penguins = sns.load_dataset(\"penguins\").dropna()\n with sns.axes_style(\"whitegrid\"):\n g = sns.catplot(\n penguins, x=\"sex\", y=\"body_mass_g\", hue=\"island\", col=\"sex\",\n kind=\"box\", whis=np.inf, legend=False, sharex=False,\n )\n sns.despine(left=True)\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 301, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 301, "name": "dropna", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 302, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"whitegrid\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 303, "name": "catplot", "kind": "ref", "category": "function", "info": " g = sns.catplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 307, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 308, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 312, "name": "regression", "kind": "def", "category": "function", "info": "def regression():\n\n anscombe = sns.load_dataset(\"anscombe\")\n with sns.axes_style(\"white\"):\n g = sns.lmplot(\n anscombe, x=\"x\", y=\"y\", hue=\"dataset\", col=\"dataset\", col_wrap=2,\n scatter_kws=dict(edgecolor=\".2\", facecolor=\".7\", s=80),\n line_kws=dict(lw=4), ci=None,\n )\n g.set(xlim=(2, None), ylim=(2, None))\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 314, "name": "load_dataset", "kind": "ref", "category": "function", "info": " anscombe = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 315, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"white\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 316, "name": "lmplot", "kind": "ref", "category": "function", "info": " g = sns.lmplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 322, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 326, "name": "axis_grids", "kind": "def", "category": "function", "info": "def axis_grids():\n\n penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n with sns.axes_style(\"ticks\"):\n g = sns.pairplot(\n penguins.drop(\"flipper_length_mm\", axis=1),\n diag_kind=\"kde\", diag_kws=dict(fill=False),\n plot_kws=dict(s=40, fc=\"none\", ec=\"C0\", alpha=.75, linewidth=.75),\n )\n g.figure.set_size_inches(5, 5)\n return g.figure\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 328, "name": "load_dataset", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 328, "name": "sample", "kind": "ref", "category": "function", "info": " penguins = sns.load_dataset(\"penguins\").sample(200, random_state=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 329, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(\"ticks\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 330, "name": "pairplot", "kind": "ref", "category": "function", "info": " g = sns.pairplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 331, "name": "drop", "kind": "ref", "category": "function", "info": " penguins.drop(\"flipper_length_mm\", axis=1),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 335, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " g.figure.set_size_inches(5, 5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 339, "name": "aesthetics", "kind": "def", "category": "function", "info": "def aesthetics():\n\n f = mpl.figure.Figure(figsize=(5, 5))\n for i, style in enumerate([\"darkgrid\", \"white\", \"ticks\", \"whitegrid\"], 1):\n with sns.axes_style(style):\n ax = f.add_subplot(2, 2, i)\n ax.set(xticks=[0, .25, .5, .75, 1], yticks=[0, .25, .5, .75, 1])\n sns.despine(ax=f.axes[1])\n sns.despine(ax=f.axes[2])\n return f\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 341, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 343, "name": "axes_style", "kind": "ref", "category": "function", "info": " with sns.axes_style(style):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 344, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax = f.add_subplot(2, 2, i)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 346, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=f.axes[1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 347, "name": "despine", "kind": "ref", "category": "function", "info": " sns.despine(ax=f.axes[2])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 351, "name": "color_palettes", "kind": "def", "category": "function", "info": "def color_palettes():\n\n f = mpl.figure.Figure(figsize=(5, 5))\n palettes = [\"deep\", \"husl\", \"gray\", \"ch:\", \"mako\", \"vlag\", \"icefire\"]\n axs = f.subplots(len(palettes))\n x = np.arange(10)\n for ax, name in zip(axs, palettes):\n cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n ax.pcolormesh(x[None, :], linewidth=.5, edgecolor=\"w\", alpha=.8, cmap=cmap)\n ax.set_axis_off()\n return f\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 353, "name": "Figure", "kind": "ref", "category": "function", "info": " f = mpl.figure.Figure(figsize=(5, 5))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 355, "name": "subplots", "kind": "ref", "category": "function", "info": " axs = f.subplots(len(palettes))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 358, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 358, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(sns.color_palette(name, x.size))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 359, "name": "pcolormesh", "kind": "ref", "category": "function", "info": " ax.pcolormesh(x[None, :], linewidth=.5, edgecolor=\"w\", alpha=.8, cmap=cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 360, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 364, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.connect(\"builder-inited\", main)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/sphinxext/tutorial_builder.py", "rel_fname": "doc/sphinxext/tutorial_builder.py", "line": 365, "name": "connect", "kind": "ref", "category": "function", "info": " app.connect(\"builder-inited\", main)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 9, "name": "line_type", "kind": "def", "category": "function", "info": "def line_type(line):\n\n if line.startswith(\" \"):\n return \"code\"\n else:\n return \"markdown\"\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 17, "name": "add_cell", "kind": "def", "category": "function", "info": "def add_cell(nb, lines, cell_type):\n\n cell_objs = {\n \"code\": nbformat.v4.new_code_cell,\n \"markdown\": nbformat.v4.new_markdown_cell,\n }\n text = \"\\n\".join(lines)\n cell = cell_objs[cell_type](text)\n nb[\"cells\"].append(cell)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 36, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " lines = NumpyDocString(pydoc.getdoc(obj))[\"Examples\"]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 41, "name": "new_notebook", "kind": "ref", "category": "function", "info": " nb = nbformat.v4.new_notebook()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 57, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) != cell_type:\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 60, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 61, "name": "line_type", "kind": "ref", "category": "function", "info": " cell_type = line_type(line)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 64, "name": "line_type", "kind": "ref", "category": "function", "info": " if line_type(line) == \"code\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 70, "name": "add_cell", "kind": "ref", "category": "function", "info": " add_cell(nb, cell, cell_type)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/extract_examples.py", "rel_fname": "doc/tools/extract_examples.py", "line": 72, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f\"docstrings/{name}.ipynb\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 14, "name": "poisson_disc_sample", "kind": "def", "category": "function", "info": "def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):\n \"\"\"Find positions using poisson-disc sampling.\"\"\"\n # See http://bost.ocks.org/mike/algorithms/\n rng = np.random.default_rng(seed)\n uniform = rng.uniform\n randint = rng.integers\n\n # Cache the results\n key = array_radius, pad_radius, seed\n if key in XY_CACHE:\n return XY_CACHE[key]\n\n # Start at a fixed point we know will work\n start = np.zeros(d)\n samples = [start]\n queue = [start]\n\n while queue:\n\n # Pick a sample to expand from\n s_idx = randint(len(queue))\n s = queue[s_idx]\n\n for i in range(candidates):\n # Generate a candidate from this sample\n coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n\n # Check the three conditions to accept the candidate\n in_array = np.sqrt(np.sum(coords ** 2)) < array_radius\n in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)\n\n if in_array and in_ring:\n # Accept the candidate\n samples.append(coords)\n queue.append(coords)\n break\n\n if (i + 1) == candidates:\n # We've exhausted the particular sample\n queue.pop(s_idx)\n\n samples = np.array(samples)\n XY_CACHE[key] = samples\n return samples\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 17, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 34, "name": "randint", "kind": "ref", "category": "function", "info": " s_idx = randint(len(queue))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 39, "name": "uniform", "kind": "ref", "category": "function", "info": " coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 60, "name": "logo", "kind": "def", "category": "function", "info": "def logo(\n ax,\n color_kws, ring, ring_idx, edge,\n pdf_means, pdf_sigma, dy, y0, w, h,\n hist_mean, hist_sigma, hist_y0, lw, skip,\n scatter, pad, scale,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 70, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 71, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect('equal')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 79, "name": "gaussian", "kind": "ref", "category": "function", "info": " y = gaussian(x.size, pdf_sigma)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 97, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 104, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(bg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 115, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(wedge)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 120, "name": "gaussian", "kind": "ref", "category": "function", "info": " hist_y = gaussian(x.size, hist_sigma)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 133, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(fg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 138, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " u.set_clip_path(fg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 143, "name": "poisson_disc_sample", "kind": "ref", "category": "function", "info": " xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 153, "name": "get_paths", "kind": "ref", "category": "function", "info": " path = u.get_paths()[0]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 154, "name": "get_transform", "kind": "ref", "category": "function", "info": " points.set_clip_path(path, transform=u.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 155, "name": "set_visible", "kind": "ref", "category": "function", "info": " u.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 182, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " color = sns.cubehelix_palette(**kwargs[\"color_kws\"])[color_idx]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 187, "name": "logo", "kind": "ref", "category": "function", "info": " logo(ax, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 194, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 204, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 212, "name": "logo", "kind": "ref", "category": "function", "info": " logo(axs[0], **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/generate_logos.py", "rel_fname": "doc/tools/generate_logos.py", "line": 222, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " axs[1].set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 40, "name": "MetadataError", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 44, "name": "pop_recursive", "kind": "def", "category": "function", "info": "def pop_recursive(d, key, default=None):\n \"\"\"dict.pop(key) where `key` is a `.`-delimited list of nested keys.\n >>> d = {'a': {'b': 1, 'c': 2}}\n >>> pop_recursive(d, 'a.c')\n 2\n >>> d\n {'a': {'b': 1}}\n \"\"\"\n nested = key.split('.')\n current = d\n for k in nested[:-1]:\n if hasattr(current, 'get'):\n current = current.get(k, {})\n else:\n return default\n if not hasattr(current, 'pop'):\n return default\n return current.pop(nested[-1], default)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 64, "name": "strip_output", "kind": "def", "category": "function", "info": "def strip_output(nb):\n \"\"\"\n Strip the outputs, execution count/prompt number and miscellaneous\n metadata from a notebook object, unless specified to keep either the\n outputs or counts.\n \"\"\"\n keys = {'metadata': [], 'cell': {'metadata': [\"execution\"]}}\n\n nb.metadata.pop('signature', None)\n nb.metadata.pop('widgets', None)\n\n for field in keys['metadata']:\n pop_recursive(nb.metadata, field)\n\n if 'NB_KERNEL' in os.environ:\n nb.metadata['kernelspec']['name'] = os.environ['NB_KERNEL']\n nb.metadata['kernelspec']['display_name'] = os.environ['NB_KERNEL']\n\n for cell in nb.cells:\n\n if 'outputs' in cell:\n cell['outputs'] = []\n if 'prompt_number' in cell:\n cell['prompt_number'] = None\n if 'execution_count' in cell:\n cell['execution_count'] = None\n\n # Always remove this metadata\n for output_style in ['collapsed', 'scrolled']:\n if output_style in cell.metadata:\n cell.metadata[output_style] = False\n if 'metadata' in cell:\n for field in ['collapsed', 'scrolled', 'ExecuteTime']:\n cell.metadata.pop(field, None)\n for (extra, fields) in keys['cell'].items():\n if extra in cell:\n for field in fields:\n pop_recursive(getattr(cell, extra), field)\n return nb\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 76, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(nb.metadata, field)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 101, "name": "pop_recursive", "kind": "ref", "category": "function", "info": " pop_recursive(getattr(cell, extra), field)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 120, "name": "ExecutePreprocessor", "kind": "ref", "category": "function", "info": " ep = ExecutePreprocessor(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 125, "name": "preprocess", "kind": "ref", "category": "function", "info": " ep.preprocess(nb, {\"metadata\": {\"path\": basedir}})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 142, "name": "RSTExporter", "kind": "ref", "category": "function", "info": " exp = RSTExporter()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 151, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 151, "name": "TagRemovePreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(TagRemovePreprocessor(config=c), True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "register_preprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 152, "name": "ExtractOutputPreprocessor", "kind": "ref", "category": "function", "info": " exp.register_preprocessor(ExtractOutputPreprocessor(config=c), True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 154, "name": "from_notebook_node", "kind": "ref", "category": "function", "info": " body, resources = exp.from_notebook_node(nb)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 157, "name": "strip_output", "kind": "ref", "category": "function", "info": " nb = strip_output(nb)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/nb_to_doc.py", "rel_fname": "doc/tools/nb_to_doc.py", "line": 168, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(imdir):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 15, "name": "read", "kind": "ref", "category": "function", "info": " nb = nbformat.read(f, as_version=4)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/doc/tools/set_nb_kernels.py", "rel_fname": "doc/tools/set_nb_kernels.py", "line": 21, "name": "write", "kind": "ref", "category": "function", "info": " nbformat.write(nb, f)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"anscombe\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/anscombes_quartet.py", "rel_fname": "examples/anscombes_quartet.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "sns.lmplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 12, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f, left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/different_scatter_variables.py", "rel_fname": "examples/different_scatter_variables.py", "line": 19, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=\"carat\", y=\"price\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "fmri = sns.load_dataset(\"fmri\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/errorband_lineplots.py", "rel_fname": "examples/errorband_lineplots.py", "line": 14, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(x=\"timepoint\", y=\"signal\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_histogram.py", "rel_fname": "examples/faceted_histogram.py", "line": 10, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "dots = sns.load_dataset(\"dots\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 13, "name": "color_palette", "kind": "ref", "category": "function", "info": "palette = sns.color_palette(\"rocket_r\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/faceted_lineplot.py", "rel_fname": "examples/faceted_lineplot.py", "line": 16, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 12, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 18, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Body mass (g)\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_barplot.py", "rel_fname": "examples/grouped_barplot.py", "line": 19, "name": "set_title", "kind": "ref", "category": "function", "info": "g.legend.set_title(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\", palette=\"pastel\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 14, "name": "boxplot", "kind": "ref", "category": "function", "info": "sns.boxplot(x=\"day\", y=\"total_bill\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_boxplot.py", "rel_fname": "examples/grouped_boxplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(offset=10, trim=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 13, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=tips, x=\"day\", y=\"total_bill\", hue=\"smoker\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/grouped_violinplots.py", "rel_fname": "examples/grouped_violinplots.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 12, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 16, "name": "get_level_values", "kind": "ref", "category": "function", "info": " .get_level_values(\"network\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 17, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 18, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 24, "name": "corr", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 24, "name": "stack", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 24, "name": "reset_index", "kind": "ref", "category": "function", "info": "corr_mat = df.corr().stack().reset_index(name=\"correlation\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 27, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 36, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 37, "name": "margins", "kind": "ref", "category": "function", "info": "g.ax.margins(.02)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 38, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": "for label in g.ax.get_xticklabels():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 39, "name": "set_rotation", "kind": "ref", "category": "function", "info": " label.set_rotation(90)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 40, "name": "get_legend_handles", "kind": "ref", "category": "function", "info": "for artist in get_legend_handles(g.legend):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/heat_scatter.py", "rel_fname": "examples/heat_scatter.py", "line": 41, "name": "set_edgecolor", "kind": "ref", "category": "function", "info": " artist.set_edgecolor(\".7\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(11)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 11, "name": "gamma", "kind": "ref", "category": "function", "info": "x = rs.gamma(2, size=1000)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = -.5 * x + rs.normal(size=1000)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/hexbin_marginals.py", "rel_fname": "examples/hexbin_marginals.py", "line": 14, "name": "jointplot", "kind": "ref", "category": "function", "info": "sns.jointplot(x=x, y=y, kind=\"hex\", color=\"#4CB391\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 16, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(f)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 18, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 27, "name": "set_major_formatter", "kind": "ref", "category": "function", "info": "ax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/histogram_stacked.py", "rel_fname": "examples/histogram_stacked.py", "line": 28, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks([500, 1000, 2000, 5000, 10000])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 13, "name": "set_xscale", "kind": "ref", "category": "function", "info": "ax.set_xscale(\"log\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 16, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 23, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(x=\"distance\", y=\"method\", data=planets,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/horizontal_boxplot.py", "rel_fname": "examples/horizontal_boxplot.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(trim=True, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 17, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 20, "name": "stripplot", "kind": "ref", "category": "function", "info": "sns.stripplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 28, "name": "pointplot", "kind": "ref", "category": "function", "info": "sns.pointplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/jitter_stripplot.py", "rel_fname": "examples/jitter_stripplot.py", "line": 35, "name": "move_legend", "kind": "ref", "category": "function", "info": "sns.move_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 12, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=planets, x=\"year\", y=\"distance\", marginal_ticks=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 18, "name": "add_axes", "kind": "ref", "category": "function", "info": "cax = g.figure.add_axes([.15, .55, .02, .2])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 21, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_histogram.py", "rel_fname": "examples/joint_histogram.py", "line": 25, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, element=\"step\", color=\"#03012d\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/joint_kde.py", "rel_fname": "examples/joint_kde.py", "line": 13, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", rc={\"axes.facecolor\": (0, 0, 0, 0)})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 13, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(1979)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 14, "name": "randn", "kind": "ref", "category": "function", "info": "x = rs.randn(500)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 21, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "pal = sns.cubehelix_palette(10, rot=-.25, light=.7)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 22, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, row=\"g\", hue=\"g\", aspect=15, height=.5, palette=pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 31, "name": "refline", "kind": "ref", "category": "function", "info": "g.refline(y=0, linewidth=2, linestyle=\"-\", color=None, clip_on=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 35, "name": "label", "kind": "def", "category": "function", "info": "def label(x, color, label):\n ax = plt.gca()\n ax.text(0, .2, label, fontweight=\"bold\", color=color,\n ha=\"left\", va=\"center\", transform=ax.transAxes)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 47, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/kde_ridgeplot.py", "rel_fname": "examples/kde_ridgeplot.py", "line": 49, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(bottom=True, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/large_distributions.py", "rel_fname": "examples/large_distributions.py", "line": 11, "name": "boxenplot", "kind": "ref", "category": "function", "info": "sns.boxenplot(x=\"clarity\", y=\"carat\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rng = np.random.RandomState(0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 16, "name": "multivariate_normal", "kind": "ref", "category": "function", "info": "x, y = rng.multivariate_normal(mean, cov, n).T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 20, "name": "scatterplot", "kind": "ref", "category": "function", "info": "sns.scatterplot(x=x, y=y, s=5, color=\".15\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 21, "name": "histplot", "kind": "ref", "category": "function", "info": "sns.histplot(x=x, y=y, bins=50, pthresh=.1, cmap=\"mako\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/layered_bivariate_plot.py", "rel_fname": "examples/layered_bivariate_plot.py", "line": 22, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(x=x, y=y, levels=5, color=\"w\", linewidths=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/logistic_regression.py", "rel_fname": "examples/logistic_regression.py", "line": 16, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(x=\"age\", y=\"survived\", col=\"sex\", hue=\"sex\", data=df,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(4)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 16, "name": "randint", "kind": "ref", "category": "function", "info": "pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 24, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "grid = sns.FacetGrid(df, col=\"walk\", hue=\"walk\", palette=\"tab20c\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_facets.py", "rel_fname": "examples/many_facets.py", "line": 28, "name": "refline", "kind": "ref", "category": "function", "info": "grid.refline(y=0, linestyle=\":\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 12, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 15, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(33)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 16, "name": "normal", "kind": "ref", "category": "function", "info": "d = pd.DataFrame(data=rs.normal(size=(100, 26)),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 20, "name": "corr", "kind": "ref", "category": "function", "info": "corr = d.corr()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 29, "name": "diverging_palette", "kind": "ref", "category": "function", "info": "cmap = sns.diverging_palette(230, 20, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/many_pairwise_correlations.py", "rel_fname": "examples/many_pairwise_correlations.py", "line": 32, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", color_codes=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=mpg, x=\"mpg\", y=\"acceleration\", space=0, ratio=17)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.scatterplot, size=mpg[\"horsepower\"], sizes=(30, 120),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/marginal_ticks.py", "rel_fname": "examples/marginal_ticks.py", "line": 14, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.rugplot, height=1, color=\"g\", alpha=.6)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "iris = sns.load_dataset(\"iris\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 14, "name": "set_aspect", "kind": "ref", "category": "function", "info": "ax.set_aspect(\"equal\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 17, "name": "kdeplot", "kind": "ref", "category": "function", "info": "sns.kdeplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_bivariate_kde.py", "rel_fname": "examples/multiple_bivariate_kde.py", "line": 18, "name": "query", "kind": "ref", "category": "function", "info": " data=iris.query(\"species != 'versicolor'\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_conditional_kde.py", "rel_fname": "examples/multiple_conditional_kde.py", "line": 13, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 8, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 11, "name": "blend_palette", "kind": "ref", "category": "function", "info": "cmap = sns.blend_palette(colors, input=\"husl\", as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_ecdf.py", "rel_fname": "examples/multiple_ecdf.py", "line": 12, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "penguins = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 13, "name": "lmplot", "kind": "ref", "category": "function", "info": "g = sns.lmplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/multiple_regression.py", "rel_fname": "examples/multiple_regression.py", "line": 20, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"Snoot length (mm)\", \"Snoot depth (mm)\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 11, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(df, diag_sharey=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 12, "name": "map_upper", "kind": "ref", "category": "function", "info": "g.map_upper(sns.scatterplot, s=15)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 13, "name": "map_lower", "kind": "ref", "category": "function", "info": "g.map_lower(sns.kdeplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pair_grid_with_kde.py", "rel_fname": "examples/pair_grid_with_kde.py", "line": 14, "name": "map_diag", "kind": "ref", "category": "function", "info": "g.map_diag(sns.kdeplot, lw=2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 6, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "titanic = sns.load_dataset(\"titanic\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 12, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(titanic, y_vars=\"survived\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/paired_pointplots.py", "rel_fname": "examples/paired_pointplots.py", "line": 19, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(fig=g.fig, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "PairGrid", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "g = sns.PairGrid(crashes.sort_values(\"total\", ascending=False),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 34, "name": "grid", "kind": "ref", "category": "function", "info": " ax.xaxis.grid(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 35, "name": "grid", "kind": "ref", "category": "function", "info": " ax.yaxis.grid(True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pairgrid_dotplot.py", "rel_fname": "examples/pairgrid_dotplot.py", "line": 37, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\", context=\"talk\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 9, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(8)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y1, palette=\"rocket\", ax=ax1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 19, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax1.set_ylabel(\"Sequential\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 23, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y2, palette=\"vlag\", ax=ax2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 25, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax2.set_ylabel(\"Diverging\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 28, "name": "choice", "kind": "ref", "category": "function", "info": "y3 = rs.choice(y1, len(y1), replace=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 29, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=x, y=y3, palette=\"deep\", ax=ax3)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 31, "name": "set_ylabel", "kind": "ref", "category": "function", "info": "ax3.set_ylabel(\"Qualitative\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_choices.py", "rel_fname": "examples/palette_choices.py", "line": 34, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 11, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(50)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 20, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " cmap = sns.cubehelix_palette(start=s, light=1, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 23, "name": "normal", "kind": "ref", "category": "function", "info": " x, y = rs.normal(size=(2, 50))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 24, "name": "kdeplot", "kind": "ref", "category": "function", "info": " sns.kdeplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/palette_generation.py", "rel_fname": "examples/palette_generation.py", "line": 31, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "load_dataset", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 13, "name": "sort_values", "kind": "ref", "category": "function", "info": "crashes = sns.load_dataset(\"car_crashes\").sort_values(\"total\", ascending=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 16, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"pastel\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 17, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"total\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 21, "name": "set_color_codes", "kind": "ref", "category": "function", "info": "sns.set_color_codes(\"muted\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 22, "name": "barplot", "kind": "ref", "category": "function", "info": "sns.barplot(x=\"alcohol\", y=\"abbrev\", data=crashes,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/part_whole_bars.py", "rel_fname": "examples/part_whole_bars.py", "line": 29, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "exercise = sns.load_dataset(\"exercise\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 13, "name": "catplot", "kind": "ref", "category": "function", "info": "g = sns.catplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/pointplot_anova.py", "rel_fname": "examples/pointplot_anova.py", "line": 18, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 11, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/radial_facets.py", "rel_fname": "examples/radial_facets.py", "line": 21, "name": "FacetGrid", "kind": "ref", "category": "function", "info": "g = sns.FacetGrid(df, col=\"speed\", hue=\"speed\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"darkgrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "tips = sns.load_dataset(\"tips\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/regression_marginals.py", "rel_fname": "examples/regression_marginals.py", "line": 10, "name": "jointplot", "kind": "ref", "category": "function", "info": "g = sns.jointplot(x=\"total_bill\", y=\"tip\", data=tips,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 10, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(7)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 11, "name": "normal", "kind": "ref", "category": "function", "info": "x = rs.normal(2, 1, 75)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 12, "name": "normal", "kind": "ref", "category": "function", "info": "y = 2 + 1.5 * x + rs.normal(0, 2, 75)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/residplot.py", "rel_fname": "examples/residplot.py", "line": 15, "name": "residplot", "kind": "ref", "category": "function", "info": "sns.residplot(x=x, y=y, lowess=True, color=\"g\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatter_bubbles.py", "rel_fname": "examples/scatter_bubbles.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "sns.relplot(x=\"horsepower\", y=\"mpg\", hue=\"origin\", size=\"weight\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\", palette=\"muted\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_categorical.py", "rel_fname": "examples/scatterplot_categorical.py", "line": 14, "name": "swarmplot", "kind": "ref", "category": "function", "info": "ax = sns.swarmplot(data=df, x=\"body_mass_g\", y=\"sex\", hue=\"species\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"ticks\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_matrix.py", "rel_fname": "examples/scatterplot_matrix.py", "line": 10, "name": "pairplot", "kind": "ref", "category": "function", "info": "sns.pairplot(df, hue=\"species\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "planets = sns.load_dataset(\"planets\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 13, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": "cmap = sns.cubehelix_palette(rot=-.2, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 14, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 21, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.xaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 22, "name": "grid", "kind": "ref", "category": "function", "info": "g.ax.yaxis.grid(True, \"minor\", linewidth=.25)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/scatterplot_sizes.py", "rel_fname": "examples/scatterplot_sizes.py", "line": 23, "name": "despine", "kind": "ref", "category": "function", "info": "g.despine(left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 11, "name": "default_rng", "kind": "ref", "category": "function", "info": "rs = np.random.default_rng(0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 13, "name": "normal", "kind": "ref", "category": "function", "info": "d = rs.normal(0, 2, (n, p))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/simple_violinplots.py", "rel_fname": "examples/simple_violinplots.py", "line": 17, "name": "violinplot", "kind": "ref", "category": "function", "info": "sns.violinplot(data=d, palette=\"light:g\", inner=\"points\", orient=\"h\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"white\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"penguins\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 11, "name": "JointGrid", "kind": "ref", "category": "function", "info": "g = sns.JointGrid(data=df, x=\"body_mass_g\", y=\"bill_depth_mm\", space=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 12, "name": "plot_joint", "kind": "ref", "category": "function", "info": "g.plot_joint(sns.kdeplot,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/smooth_bivariate_kde.py", "rel_fname": "examples/smooth_bivariate_kde.py", "line": 15, "name": "plot_marginals", "kind": "ref", "category": "function", "info": "g.plot_marginals(sns.histplot, color=\"#03051A\", alpha=1, bins=25)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights_long = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 13, "name": "pivot", "kind": "ref", "category": "function", "info": " .pivot(index=\"month\", columns=\"year\", values=\"passengers\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/spreadsheet_heatmap.py", "rel_fname": "examples/spreadsheet_heatmap.py", "line": 18, "name": "heatmap", "kind": "ref", "category": "function", "info": "sns.heatmap(flights, annot=True, fmt=\"d\", linewidths=.5, ax=ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 7, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 9, "name": "load_dataset", "kind": "ref", "category": "function", "info": "mpg = sns.load_dataset(\"mpg\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 10, "name": "catplot", "kind": "ref", "category": "function", "info": "sns.catplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/strip_regplot.py", "rel_fname": "examples/strip_regplot.py", "line": 14, "name": "regplot", "kind": "ref", "category": "function", "info": "sns.regplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 21, "name": "husl_palette", "kind": "ref", "category": "function", "info": "network_pal = sns.husl_palette(8, s=.45)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 25, "name": "get_level_values", "kind": "ref", "category": "function", "info": "networks = df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "clustermap", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/structured_heatmap.py", "rel_fname": "examples/structured_heatmap.py", "line": 29, "name": "corr", "kind": "ref", "category": "function", "info": "g = sns.clustermap(df.corr(), center=0, cmap=\"vlag\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "diamonds = sns.load_dataset(\"diamonds\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/three_variable_histogram.py", "rel_fname": "examples/three_variable_histogram.py", "line": 11, "name": "displot", "kind": "ref", "category": "function", "info": "sns.displot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 9, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"dark\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 10, "name": "load_dataset", "kind": "ref", "category": "function", "info": "flights = sns.load_dataset(\"flights\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 13, "name": "relplot", "kind": "ref", "category": "function", "info": "g = sns.relplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 24, "name": "text", "kind": "ref", "category": "function", "info": " ax.text(.8, .85, year, transform=ax.transAxes, fontweight=\"bold\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 27, "name": "lineplot", "kind": "ref", "category": "function", "info": " sns.lineplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "set_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 33, "name": "get_xticks", "kind": "ref", "category": "function", "info": "ax.set_xticks(ax.get_xticks()[::2])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 36, "name": "set_titles", "kind": "ref", "category": "function", "info": "g.set_titles(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 37, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": "g.set_axis_labels(\"\", \"Passengers\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/timeseries_facets.py", "rel_fname": "examples/timeseries_facets.py", "line": 38, "name": "tight_layout", "kind": "ref", "category": "function", "info": "g.tight_layout()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 10, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 12, "name": "RandomState", "kind": "ref", "category": "function", "info": "rs = np.random.RandomState(365)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 13, "name": "randn", "kind": "ref", "category": "function", "info": "values = rs.randn(365, 4).cumsum(axis=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 16, "name": "rolling", "kind": "ref", "category": "function", "info": "data = data.rolling(7).mean()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_data_lineplot.py", "rel_fname": "examples/wide_data_lineplot.py", "line": 18, "name": "lineplot", "kind": "ref", "category": "function", "info": "sns.lineplot(data=data, palette=\"tab10\", linewidth=2.5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 8, "name": "set_theme", "kind": "ref", "category": "function", "info": "sns.set_theme(style=\"whitegrid\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 11, "name": "load_dataset", "kind": "ref", "category": "function", "info": "df = sns.load_dataset(\"brain_networks\", header=[0, 1, 2], index_col=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 15, "name": "get_level_values", "kind": "ref", "category": "function", "info": "used_columns = (df.columns.get_level_values(\"network\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 16, "name": "astype", "kind": "ref", "category": "function", "info": " .astype(int)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 17, "name": "isin", "kind": "ref", "category": "function", "info": " .isin(used_networks))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "corr", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "groupby", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 21, "name": "mean", "kind": "ref", "category": "function", "info": "corr_df = df.corr().groupby(level=\"network\").mean()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 22, "name": "astype", "kind": "ref", "category": "function", "info": "corr_df.index = corr_df.index.astype(int)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 23, "name": "sort_index", "kind": "ref", "category": "function", "info": "corr_df = corr_df.sort_index().T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/examples/wide_form_violinplot.py", "rel_fname": "examples/wide_form_violinplot.py", "line": 33, "name": "despine", "kind": "ref", "category": "function", "info": "sns.despine(left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 5, "name": "MarkerStyle", "kind": "def", "category": "function", "info": "def MarkerStyle(marker=None, fillstyle=None):\n \"\"\"\n Allow MarkerStyle to accept a MarkerStyle object as parameter.\n\n Supports matplotlib < 3.3.0\n https://github.com/matplotlib/matplotlib/pull/16692\n\n \"\"\"\n if isinstance(marker, mpl.markers.MarkerStyle):\n if fillstyle is None:\n return marker\n else:\n marker = marker.get_marker()\n return mpl.markers.MarkerStyle(marker, fillstyle)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 17, "name": "get_marker", "kind": "ref", "category": "function", "info": " marker = marker.get_marker()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 18, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return mpl.markers.MarkerStyle(marker, fillstyle)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 21, "name": "norm_from_scale", "kind": "def", "category": "function", "info": "def norm_from_scale(scale, norm):\n \"\"\"Produce a Normalize object given a Scale and min/max domain limits.\"\"\"\n # This is an internal maplotlib function that simplifies things to access\n # It is likely to become part of the matplotlib API at some point:\n # https://github.com/matplotlib/matplotlib/issues/20329\n if isinstance(norm, mpl.colors.Normalize):\n return norm\n\n if scale is None:\n return None\n\n if norm is None:\n vmin = vmax = None\n else:\n vmin, vmax = norm # TODO more helpful error if this fails?\n\n class ScaledNorm(mpl.colors.Normalize):\n\n def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 37, "name": "ScaledNorm", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 39, "name": "__call__", "kind": "def", "category": "function", "info": " def __call__(self, value, clip=None):\n # From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py\n # See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE\n value, is_scalar = self.process_value(value)\n self.autoscale_None(value)\n if self.vmin > self.vmax:\n raise ValueError(\"vmin must be less or equal to vmax\")\n if self.vmin == self.vmax:\n return np.full_like(value, 0)\n if clip is None:\n clip = self.clip\n if clip:\n value = np.clip(value, self.vmin, self.vmax)\n # ***** Seaborn changes start ****\n t_value = self.transform(value).reshape(np.shape(value))\n t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n # ***** Seaborn changes end *****\n if not np.isfinite([t_vmin, t_vmax]).all():\n raise ValueError(\"Invalid vmin or vmax\")\n t_value -= t_vmin\n t_value /= (t_vmax - t_vmin)\n t_value = np.ma.masked_invalid(t_value, copy=False)\n return t_value[0] if is_scalar else t_value\n\n new_norm = ScaledNorm(vmin, vmax)\n new_norm.transform = scale.get_transform().transform\n\n return new_norm\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 42, "name": "process_value", "kind": "ref", "category": "function", "info": " value, is_scalar = self.process_value(value)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 43, "name": "autoscale_None", "kind": "ref", "category": "function", "info": " self.autoscale_None(value)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 53, "name": "transform", "kind": "ref", "category": "function", "info": " t_value = self.transform(value).reshape(np.shape(value))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 54, "name": "transform", "kind": "ref", "category": "function", "info": " t_vmin, t_vmax = self.transform([self.vmin, self.vmax])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 60, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " t_value = np.ma.masked_invalid(t_value, copy=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 63, "name": "ScaledNorm", "kind": "ref", "category": "function", "info": " new_norm = ScaledNorm(vmin, vmax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 64, "name": "get_transform", "kind": "ref", "category": "function", "info": " new_norm.transform = scale.get_transform().transform\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 69, "name": "scale_factory", "kind": "def", "category": "function", "info": "def scale_factory(scale, axis, **kwargs):\n \"\"\"\n Backwards compatability for creation of independent scales.\n\n Matplotlib scales require an Axis object for instantiation on < 3.4.\n But the axis is not used, aside from extraction of the axis_name in LogScale.\n\n \"\"\"\n modify_transform = False\n if _version_predates(mpl, \"3.4\"):\n if axis[0] in \"xy\":\n modify_transform = True\n axis = axis[0]\n base = kwargs.pop(\"base\", None)\n if base is not None:\n kwargs[f\"base{axis}\"] = base\n nonpos = kwargs.pop(\"nonpositive\", None)\n if nonpos is not None:\n kwargs[f\"nonpos{axis}\"] = nonpos\n\n if isinstance(scale, str):\n class Axis:\n axis_name = axis\n axis = Axis()\n\n scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n\n if modify_transform:\n transform = scale.get_transform()\n transform.base = kwargs.get(\"base\", 10)\n if kwargs.get(\"nonpositive\") == \"mask\":\n # Setting a private attribute, but we only get here\n # on an old matplotlib, so this won't break going forwards\n transform._clip = False\n\n return scale\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 78, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if _version_predates(mpl, \"3.4\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 90, "name": "Axis", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 92, "name": "Axis", "kind": "ref", "category": "function", "info": " axis = Axis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 94, "name": "scale_factory", "kind": "ref", "category": "function", "info": " scale = mpl.scale.scale_factory(scale, axis, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 97, "name": "get_transform", "kind": "ref", "category": "function", "info": " transform = scale.get_transform()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 107, "name": "set_scale_obj", "kind": "def", "category": "function", "info": "def set_scale_obj(ax, axis, scale):\n \"\"\"Handle backwards compatability with setting matplotlib scale.\"\"\"\n if _version_predates(mpl, \"3.4\"):\n # The ability to pass a BaseScale instance to Axes.set_{}scale was added\n # to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089\n # Workaround: use the scale name, which is restrictive only if the user\n # wants to define a custom scale; they'll need to update the registry too.\n if scale.name is None:\n # Hack to support our custom Formatter-less CatScale\n return\n method = getattr(ax, f\"set_{axis}scale\")\n kws = {}\n if scale.name == \"function\":\n trans = scale.get_transform()\n kws[\"functions\"] = (trans._forward, trans._inverse)\n method(scale.name, **kws)\n axis_obj = getattr(ax, f\"{axis}axis\")\n scale.set_default_locators_and_formatters(axis_obj)\n else:\n ax.set(**{f\"{axis}scale\": scale})\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 109, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if _version_predates(mpl, \"3.4\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 120, "name": "get_transform", "kind": "ref", "category": "function", "info": " trans = scale.get_transform()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 122, "name": "method", "kind": "ref", "category": "function", "info": " method(scale.name, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 124, "name": "set_default_locators_and_formatters", "kind": "ref", "category": "function", "info": " scale.set_default_locators_and_formatters(axis_obj)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 129, "name": "get_colormap", "kind": "def", "category": "function", "info": "def get_colormap(name):\n \"\"\"Handle changes to matplotlib colormap interface in 3.6.\"\"\"\n try:\n return mpl.colormaps[name]\n except AttributeError:\n return mpl.cm.get_cmap(name)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 134, "name": "get_cmap", "kind": "ref", "category": "function", "info": " return mpl.cm.get_cmap(name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 137, "name": "register_colormap", "kind": "def", "category": "function", "info": "def register_colormap(name, cmap):\n \"\"\"Handle changes to matplotlib colormap interface in 3.6.\"\"\"\n try:\n if name not in mpl.colormaps:\n mpl.colormaps.register(cmap, name=name)\n except AttributeError:\n mpl.cm.register_cmap(name, cmap)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 141, "name": "register", "kind": "ref", "category": "function", "info": " mpl.colormaps.register(cmap, name=name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 143, "name": "register_cmap", "kind": "ref", "category": "function", "info": " mpl.cm.register_cmap(name, cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 146, "name": "set_layout_engine", "kind": "def", "category": "function", "info": "def set_layout_engine(fig, engine):\n \"\"\"Handle changes to auto layout engine interface in 3.6\"\"\"\n if hasattr(fig, \"set_layout_engine\"):\n fig.set_layout_engine(engine)\n else:\n # _version_predates(mpl, 3.6)\n if engine == \"tight\":\n fig.set_tight_layout(True)\n elif engine == \"constrained\":\n fig.set_constrained_layout(True)\n elif engine == \"none\":\n fig.set_tight_layout(False)\n fig.set_constrained_layout(False)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 149, "name": "set_layout_engine", "kind": "ref", "category": "function", "info": " fig.set_layout_engine(engine)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 153, "name": "set_tight_layout", "kind": "ref", "category": "function", "info": " fig.set_tight_layout(True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 155, "name": "set_constrained_layout", "kind": "ref", "category": "function", "info": " fig.set_constrained_layout(True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 157, "name": "set_tight_layout", "kind": "ref", "category": "function", "info": " fig.set_tight_layout(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 158, "name": "set_constrained_layout", "kind": "ref", "category": "function", "info": " fig.set_constrained_layout(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 161, "name": "share_axis", "kind": "def", "category": "function", "info": "def share_axis(ax0, ax1, which):\n \"\"\"Handle changes to post-hoc axis sharing.\"\"\"\n if _version_predates(mpl, \"3.5\"):\n group = getattr(ax0, f\"get_shared_{which}_axes\")()\n group.join(ax1, ax0)\n else:\n getattr(ax1, f\"share{which}\")(ax0)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 163, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if _version_predates(mpl, \"3.5\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 170, "name": "get_legend_handles", "kind": "def", "category": "function", "info": "def get_legend_handles(legend):\n \"\"\"Handle legendHandles attribute rename.\"\"\"\n if _version_predates(mpl, \"3.7\"):\n return legend.legendHandles\n else:\n return legend.legend_handles\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_compat.py", "rel_fname": "seaborn/_compat.py", "line": 172, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if _version_predates(mpl, \"3.7\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 14, "name": "PlotData", "kind": "def", "category": "class", "info": "__init__\t__contains__\tjoin\t_assign_variables"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 54, "name": "_assign_variables", "kind": "ref", "category": "function", "info": " frame, names, ids = self._assign_variables(data, variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 90, "name": "PlotData", "kind": "ref", "category": "function", "info": " new = PlotData(data, variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 118, "name": "_assign_variables", "kind": "def", "category": "function", "info": " def _assign_variables(\n self,\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataFrame, dict[str, str | None], dict[str, str | int]]:\n \"\"\"\n Assign values for plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data\n Input data where variable names map to vector values.\n variables\n Keys are names of plot variables (x, y, ...) each value is one of:\n\n - name of a column (or index level, or dictionary entry) in `data`\n - vector in any format that can construct a :class:`pandas.DataFrame`\n\n Returns\n -------\n frame\n Table mapping seaborn variables (x, y, color, ...) to data vectors.\n names\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n ids\n Like the `names` dict, but `None` values are replaced by the `id()`\n of the data object that defined the variable.\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in `data`, or when they are\n non-indexed vector datatypes that have a different length from `data`.\n\n \"\"\"\n source_data: Mapping | DataFrame\n frame: DataFrame\n names: dict[str, str | None]\n ids: dict[str, str | int]\n\n plot_data = {}\n names = {}\n ids = {}\n\n given_data = data is not None\n if data is not None:\n source_data = data\n else:\n # Data is optional; all variables can be defined as vectors\n # But simplify downstream code by always having a usable source data object\n source_data = {}\n\n # TODO Generally interested in accepting a generic DataFrame interface\n # Track https://data-apis.org/ for development\n\n # Variables can also be extracted from the index of a DataFrame\n if isinstance(source_data, pd.DataFrame):\n index = source_data.index.to_frame().to_dict(\"series\")\n else:\n index = {}\n\n for key, val in variables.items():\n\n # Simply ignore variables with no specification\n if val is None:\n continue\n\n # Try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow other hashables when\n # taking from the main data object. Allow only strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n\n # TODO this will be rendered unnecessary by the following pandas fix:\n # https://github.com/pandas-dev/pandas/pull/41283\n try:\n hash(val)\n val_is_hashable = True\n except TypeError:\n val_is_hashable = False\n\n val_as_data_key = (\n # See https://github.com/pandas-dev/pandas/pull/41283\n # (isinstance(val, abc.Hashable) and val in source_data)\n (val_is_hashable and val in source_data)\n or (isinstance(val, str) and val in index)\n )\n\n if val_as_data_key:\n val = cast(ColumnName, val)\n if val in source_data:\n plot_data[key] = source_data[val]\n elif val in index:\n plot_data[key] = index[val]\n names[key] = ids[key] = str(val)\n\n elif isinstance(val, str):\n\n # This looks like a column name but, lookup failed.\n\n err = f\"Could not interpret value `{val}` for `{key}`. \"\n if not given_data:\n err += \"Value is a string, but `data` was not passed.\"\n else:\n err += \"An entry with this name does not appear in `data`.\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value somehow represents data\n\n # Ignore empty data structures\n if isinstance(val, Sized) and len(val) == 0:\n continue\n\n # If vector has no index, it must match length of data table\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if isinstance(val, Sized) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the original name using pandas-like metadata\n if hasattr(val, \"name\"):\n names[key] = ids[key] = str(val.name) # type: ignore # mypy/1424\n else:\n names[key] = None\n ids[key] = id(val)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n # TODO Note: this fails when variable specs *only* have scalars!\n frame = pd.DataFrame(plot_data)\n\n return frame, names, ids\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/data.py", "rel_fname": "seaborn/_core/data.py", "line": 176, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = source_data.index.to_frame().to_dict(\"series\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/exceptions.py", "rel_fname": "seaborn/_core/exceptions.py", "line": 9, "name": "PlotSpecError", "kind": "def", "category": "class", "info": "_during"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/exceptions.py", "rel_fname": "seaborn/_core/exceptions.py", "line": 21, "name": "_during", "kind": "def", "category": "function", "info": " def _during(cls, step: str, var: str = \"\") -> PlotSpecError:\n \"\"\"\n Initialize the class to report the failure of a specific operation.\n \"\"\"\n message = []\n if var:\n message.append(f\"{step} failed for the `{var}` variable.\")\n else:\n message.append(f\"{step} failed.\")\n message.append(\"See the traceback above for more information.\")\n return cls(\" \".join(message))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/exceptions.py", "rel_fname": "seaborn/_core/exceptions.py", "line": 31, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(\" \".join(message))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 15, "name": "GroupBy", "kind": "def", "category": "class", "info": "__init__\t_get_groups\t_reorder_columns\tagg\tapply"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 48, "name": "_get_groups", "kind": "def", "category": "function", "info": " def _get_groups(\n self, data: DataFrame\n ) -> tuple[str | list[str], Index | MultiIndex]:\n \"\"\"Return index with Cartesian product of ordered grouping variable levels.\"\"\"\n levels = {}\n for var, order in self.order.items():\n if var in data:\n if order is None:\n order = categorical_order(data[var])\n levels[var] = order\n\n grouper: str | list[str]\n groups: Index | MultiIndex\n if not levels:\n grouper = []\n groups = pd.Index([])\n elif len(levels) > 1:\n grouper = list(levels)\n groups = pd.MultiIndex.from_product(levels.values(), names=grouper)\n else:\n grouper, = list(levels)\n groups = pd.Index(levels[grouper], name=grouper)\n return grouper, groups\n\n def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n # Implies that we had a MultiIndex so key is iterable\n group_ids = dict(zip(grouper, cast(Iterable, key)))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 56, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(data[var])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 72, "name": "_reorder_columns", "kind": "def", "category": "function", "info": " def _reorder_columns(self, res, data):\n \"\"\"Reorder result columns to match original order with new columns appended.\"\"\"\n cols = [c for c in data if c in res]\n cols += [c for c in res if c not in data]\n return res.reindex(columns=pd.Index(cols))\n\n def agg(self, data: DataFrame, *args, **kwargs) -> DataFrame:\n \"\"\"\n Reduce each group to a single row in the output.\n\n The output will have a row for each unique combination of the grouping\n variable levels with null values for the aggregated variable(s) where\n those combinations do not appear in the dataset.\n\n \"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n # We will need to see whether there are valid usecases that end up here\n raise ValueError(\"No grouping variables are present in dataframe\")\n\n res = (\n data\n .groupby(grouper, sort=False, observed=True)\n .agg(*args, **kwargs)\n .reindex(groups)\n .reset_index()\n .pipe(self._reorder_columns, data)\n )\n\n return res\n\n def apply(\n self, data: DataFrame, func: Callable[..., DataFrame],\n *args, **kwargs,\n ) -> DataFrame:\n \"\"\"Apply a DataFrame -> DataFrame mapping to each group.\"\"\"\n grouper, groups = self._get_groups(data)\n\n if not grouper:\n return self._reorder_columns(func(data, *args, **kwargs), data)\n\n parts = {}\n for key, part_df in data.groupby(grouper, sort=False):\n parts[key] = func(part_df, *args, **kwargs)\n stack = []\n for key in groups:\n if key in parts:\n if isinstance(grouper, list):\n # Implies that we had a MultiIndex so key is iterable\n group_ids = dict(zip(grouper, cast(Iterable, key)))\n else:\n group_ids = {grouper: key}\n stack.append(parts[key].assign(**group_ids))\n\n res = pd.concat(stack, ignore_index=True)\n return self._reorder_columns(res, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 87, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 109, "name": "_get_groups", "kind": "ref", "category": "function", "info": " grouper, groups = self._get_groups(data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 112, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 112, "name": "func", "kind": "ref", "category": "function", "info": " return self._reorder_columns(func(data, *args, **kwargs), data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 116, "name": "func", "kind": "ref", "category": "function", "info": " parts[key] = func(part_df, *args, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/groupby.py", "rel_fname": "seaborn/_core/groupby.py", "line": 128, "name": "_reorder_columns", "kind": "ref", "category": "function", "info": " return self._reorder_columns(res, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 11, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 15, "name": "Move", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 27, "name": "Jitter", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 57, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(self.seed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 59, "name": "jitter", "kind": "def", "category": "function", "info": " def jitter(data, col, scale):\n noise = rng.uniform(-.5, +.5, len(data))\n offsets = noise * scale\n return data[col] + offsets\n\n if self.width is default:\n width = 0.0 if self.x or self.y else 0.2\n else:\n width = cast(float, self.width)\n\n if self.width:\n data[orient] = jitter(data, orient, width * data[\"width\"])\n if self.x:\n data[\"x\"] = jitter(data, \"x\", self.x)\n if self.y:\n data[\"y\"] = jitter(data, \"y\", self.y)\n\n return data\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 60, "name": "uniform", "kind": "ref", "category": "function", "info": " noise = rng.uniform(-.5, +.5, len(data))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 70, "name": "jitter", "kind": "ref", "category": "function", "info": " data[orient] = jitter(data, orient, width * data[\"width\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 72, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"x\"] = jitter(data, \"x\", self.x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 74, "name": "jitter", "kind": "ref", "category": "function", "info": " data[\"y\"] = jitter(data, \"y\", self.y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 80, "name": "Dodge", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 114, "name": "groupby_pos", "kind": "def", "category": "function", "info": " def groupby_pos(s):\n grouper = [groups[v] for v in [orient, \"col\", \"row\"] if v in data]\n return s.groupby(grouper, sort=False, observed=True)\n\n def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 118, "name": "scale_widths", "kind": "def", "category": "function", "info": " def scale_widths(w):\n # TODO what value to fill missing widths??? Hard problem...\n # TODO short circuit this if outer widths has no variance?\n empty = 0 if self.empty == \"fill\" else w.mean()\n filled = w.fillna(empty)\n scale = filled.max()\n norm = filled.sum()\n if self.empty == \"keep\":\n w = filled\n return w / norm * scale\n\n def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 129, "name": "widths_to_offsets", "kind": "def", "category": "function", "info": " def widths_to_offsets(w):\n return w.shift(1).fillna(0).cumsum() + (w - w.sum()) / 2\n\n new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n\n if self.gap:\n new_widths *= 1 - self.gap\n\n groups[\"_dodged\"] = groups[orient] + offsets\n groups[\"width\"] = new_widths\n\n out = (\n data\n .drop(\"width\", axis=1)\n .merge(groups, on=grouping_vars, how=\"left\")\n .drop(orient, axis=1)\n .rename(columns={\"_dodged\": orient})\n )\n\n return out\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 132, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " new_widths = groupby_pos(groups[\"width\"]).transform(scale_widths)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 133, "name": "groupby_pos", "kind": "ref", "category": "function", "info": " offsets = groupby_pos(new_widths).transform(widths_to_offsets)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 153, "name": "Stack", "kind": "def", "category": "class", "info": "_stack\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 164, "name": "_stack", "kind": "def", "category": "function", "info": " def _stack(self, df, orient):\n\n # TODO should stack do something with ymin/ymax style marks?\n # Should there be an upstream conversion to baseline/height parameterization?\n\n if df[\"baseline\"].nunique() > 1:\n err = \"Stack move cannot be used when baselines are already heterogeneous\"\n raise RuntimeError(err)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n stacked_lengths = (df[other] - df[\"baseline\"]).dropna().cumsum()\n offsets = stacked_lengths.shift(1).fillna(0)\n\n df[other] = stacked_lengths\n df[\"baseline\"] = df[\"baseline\"] + offsets\n\n return df\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n # TODO where to ensure that other semantic variables are sorted properly?\n # TODO why are we not using the passed in groupby here?\n groupers = [\"col\", \"row\", orient]\n return GroupBy(groupers).apply(data, self._stack, orient)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 189, "name": "GroupBy", "kind": "ref", "category": "function", "info": " return GroupBy(groupers).apply(data, self._stack, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 193, "name": "Shift", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 221, "name": "Norm", "kind": "def", "category": "class", "info": "_norm\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/moves.py", "rel_fname": "seaborn/_core/moves.py", "line": 249, "name": "_norm", "kind": "def", "category": "function", "info": " def _norm(self, df, var):\n\n if self.where is None:\n denom_data = df[var]\n else:\n denom_data = df.query(self.where)[var]\n df[var] = df[var] / denom_data.agg(self.func)\n\n if self.percent:\n df[var] = df[var] * 100\n\n return df\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return groupby.apply(data, self._norm, other)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 51, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 57, "name": "Layer", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 69, "name": "FacetSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 76, "name": "PairSpec", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 88, "name": "theme_context", "kind": "def", "category": "function", "info": "def theme_context(params: dict[str, Any]) -> Generator:\n \"\"\"Temporarily modify specifc matplotlib rcParams.\"\"\"\n orig_params = {k: mpl.rcParams[k] for k in params}\n color_codes = \"bgrmyck\"\n nice_colors = [*color_palette(\"deep6\"), (.15, .15, .15)]\n orig_colors = [mpl.colors.colorConverter.colors[x] for x in color_codes]\n # TODO how to allow this to reflect the color cycle when relevant?\n try:\n mpl.rcParams.update(params)\n for (code, color) in zip(color_codes, nice_colors):\n mpl.colors.colorConverter.colors[code] = color\n yield\n finally:\n mpl.rcParams.update(orig_params)\n for (code, color) in zip(color_codes, orig_colors):\n mpl.colors.colorConverter.colors[code] = color\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 92, "name": "color_palette", "kind": "ref", "category": "function", "info": " nice_colors = [*color_palette(\"deep6\"), (.15, .15, .15)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 106, "name": "build_plot_signature", "kind": "def", "category": "function", "info": "def build_plot_signature(cls):\n \"\"\"\n Decorator function for giving Plot a useful signature.\n\n Currently this mostly saves us some duplicated typing, but we would\n like eventually to have a way of registering new semantic properties,\n at which point dynamic signature generation would become more important.\n\n \"\"\"\n sig = inspect.signature(cls)\n params = [\n inspect.Parameter(\"args\", inspect.Parameter.VAR_POSITIONAL),\n inspect.Parameter(\"data\", inspect.Parameter.KEYWORD_ONLY, default=None)\n ]\n params.extend([\n inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=None)\n for name in PROPERTIES\n ])\n new_sig = sig.replace(parameters=params)\n cls.__signature__ = new_sig\n\n known_properties = textwrap.fill(\n \", \".join([f\"|{p}|\" for p in PROPERTIES]),\n width=78, subsequent_indent=\" \" * 8,\n )\n\n if cls.__doc__ is not None: # support python -OO mode\n cls.__doc__ = cls.__doc__.format(known_properties=known_properties)\n\n return cls\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 141, "name": "ThemeConfig", "kind": "def", "category": "class", "info": "__init__\t_default\treset\tupdate\t_filter_params\t_html_table\t_repr_html_"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 153, "name": "reset", "kind": "ref", "category": "function", "info": " self.reset()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 156, "name": "_default", "kind": "def", "category": "function", "info": " def _default(self) -> dict[str, Any]:\n\n return {\n **self._filter_params(mpl.rcParamsDefault),\n **axes_style(\"darkgrid\"),\n **plotting_context(\"notebook\"),\n \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n }\n\n def reset(self) -> None:\n \"\"\"Update the theme dictionary with seaborn's default values.\"\"\"\n self.update(self._default)\n\n def update(self, other: dict[str, Any] | None = None, /, **kwds):\n \"\"\"Update the theme with a dictionary or keyword arguments of rc parameters.\"\"\"\n if other is not None:\n theme = self._filter_params(other)\n else:\n theme = {}\n theme.update(kwds)\n super().update(theme)\n\n def _filter_params(self, params: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Restruct to thematic rc params.\"\"\"\n return {\n k: v for k, v in params.items()\n if any(k.startswith(p) for p in self.THEME_GROUPS)\n }\n\n def _html_table(self, params: dict[str, Any]) -> list[str]:\n\n lines = [\"\"]\n for k, v in params.items():\n row = f\"\"\n lines.append(row)\n lines.append(\"
{k}:{v!r}
\")\n return lines\n\n def _repr_html_(self) -> str:\n\n repr = [\n \"
\",\n \"
\",\n *self._html_table(self),\n \"
\",\n \"
\",\n ]\n return \"\\n\".join(repr)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 159, "name": "_filter_params", "kind": "ref", "category": "function", "info": " **self._filter_params(mpl.rcParamsDefault),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 160, "name": "axes_style", "kind": "ref", "category": "function", "info": " **axes_style(\"darkgrid\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 161, "name": "plotting_context", "kind": "ref", "category": "function", "info": " **plotting_context(\"notebook\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 162, "name": "color_palette", "kind": "ref", "category": "function", "info": " \"axes.prop_cycle\": cycler(\"color\", color_palette(\"deep\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 165, "name": "reset", "kind": "def", "category": "function", "info": " def reset(self) -> None:\n \"\"\"Update the theme dictionary with seaborn's default values.\"\"\"\n self.update(self._default)\n\n def update(self, other: dict[str, Any] | None = None, /, **kwds):\n \"\"\"Update the theme with a dictionary or keyword arguments of rc parameters.\"\"\"\n if other is not None:\n theme = self._filter_params(other)\n else:\n theme = {}\n theme.update(kwds)\n super().update(theme)\n\n def _filter_params(self, params: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Restruct to thematic rc params.\"\"\"\n return {\n k: v for k, v in params.items()\n if any(k.startswith(p) for p in self.THEME_GROUPS)\n }\n\n def _html_table(self, params: dict[str, Any]) -> list[str]:\n\n lines = [\"\"]\n for k, v in params.items():\n row = f\"\"\n lines.append(row)\n lines.append(\"
{k}:{v!r}
\")\n return lines\n\n def _repr_html_(self) -> str:\n\n repr = [\n \"
\",\n \"
\",\n *self._html_table(self),\n \"
\",\n \"
\",\n ]\n return \"\\n\".join(repr)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 172, "name": "_filter_params", "kind": "ref", "category": "function", "info": " theme = self._filter_params(other)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 178, "name": "_filter_params", "kind": "def", "category": "function", "info": " def _filter_params(self, params: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Restruct to thematic rc params.\"\"\"\n return {\n k: v for k, v in params.items()\n if any(k.startswith(p) for p in self.THEME_GROUPS)\n }\n\n def _html_table(self, params: dict[str, Any]) -> list[str]:\n\n lines = [\"\"]\n for k, v in params.items():\n row = f\"\"\n lines.append(row)\n lines.append(\"
{k}:{v!r}
\")\n return lines\n\n def _repr_html_(self) -> str:\n\n repr = [\n \"
\",\n \"
\",\n *self._html_table(self),\n \"
\",\n \"
\",\n ]\n return \"\\n\".join(repr)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 185, "name": "_html_table", "kind": "def", "category": "function", "info": " def _html_table(self, params: dict[str, Any]) -> list[str]:\n\n lines = [\"\"]\n for k, v in params.items():\n row = f\"\"\n lines.append(row)\n lines.append(\"
{k}:{v!r}
\")\n return lines\n\n def _repr_html_(self) -> str:\n\n repr = [\n \"
\",\n \"
\",\n *self._html_table(self),\n \"
\",\n \"
\",\n ]\n return \"\\n\".join(repr)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 199, "name": "_html_table", "kind": "ref", "category": "function", "info": " *self._html_table(self),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 206, "name": "DisplayConfig", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 213, "name": "PlotConfig", "kind": "def", "category": "class", "info": "__init__\ttheme\tdisplay"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 217, "name": "ThemeConfig", "kind": "ref", "category": "function", "info": " self._theme = ThemeConfig()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 221, "name": "theme", "kind": "def", "category": "function", "info": " def theme(self) -> dict[str, Any]:\n \"\"\"\n Dictionary of base theme parameters for :class:`Plot`.\n\n Keys and values correspond to matplotlib rc params, as documented here:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n \"\"\"\n return self._theme\n\n @property\n def display(self) -> DisplayConfig:\n \"\"\"\n Dictionary of parameters for rich display in Jupyter notebook.\n\n Valid parameters:\n\n - format (\"png\" or \"svg\"): Image format to produce\n - scaling (float): Relative scaling of embedded image\n - hidpi (bool): When True, double the DPI while preserving the size\n\n \"\"\"\n return self._display\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 232, "name": "display", "kind": "def", "category": "function", "info": " def display(self) -> DisplayConfig:\n \"\"\"\n Dictionary of parameters for rich display in Jupyter notebook.\n\n Valid parameters:\n\n - format (\"png\" or \"svg\"): Image format to produce\n - scaling (float): Relative scaling of embedded image\n - hidpi (bool): When True, double the DPI while preserving the size\n\n \"\"\"\n return self._display\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 250, "name": "Plot", "kind": "def", "category": "class", "info": "__init__\t_resolve_positionals\t__add__\t_repr_png_\t_repr_svg_\t_clone\t_theme_with_defaults\t_variables\ton\tadd\tpair\tfacet\tscale\tshare\tlimit\tlabel\tlayout\ttheme\tsave\tshow\tplot\t_plot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 284, "name": "PlotConfig", "kind": "ref", "category": "function", "info": " config = PlotConfig()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 310, "name": "_resolve_positionals", "kind": "ref", "category": "function", "info": " data, variables = self._resolve_positionals(args, data, variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 317, "name": "PlotData", "kind": "ref", "category": "function", "info": " self._data = PlotData(data, variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 336, "name": "_resolve_positionals", "kind": "def", "category": "function", "info": " def _resolve_positionals(\n self,\n args: tuple[DataSource | VariableSpec, ...],\n data: DataSource,\n variables: dict[str, VariableSpec],\n ) -> tuple[DataSource, dict[str, VariableSpec]]:\n \"\"\"Handle positional arguments, which may contain data / x / y.\"\"\"\n if len(args) > 3:\n err = \"Plot() accepts no more than 3 positional arguments (data, x, y).\"\n raise TypeError(err)\n\n # TODO need some clearer way to differentiate data / vector here\n # (There might be an abstract DataFrame class to use here?)\n if isinstance(args[0], (abc.Mapping, pd.DataFrame)):\n if data is not None:\n raise TypeError(\"`data` given by both name and position.\")\n data, args = args[0], args[1:]\n\n if len(args) == 2:\n x, y = args\n elif len(args) == 1:\n x, y = *args, None\n else:\n x = y = None\n\n for name, var in zip(\"yx\", (y, x)):\n if var is not None:\n if name in variables:\n raise TypeError(f\"`{name}` given by both name and position.\")\n # Keep coordinates at the front of the variables dict\n # Cast type because we know this isn't a DataSource at this point\n variables = {name: cast(VariableSpec, var), **variables}\n\n return data, variables\n\n def __add__(self, other):\n\n if isinstance(other, Mark) or isinstance(other, Stat):\n raise TypeError(\"Sorry, this isn't ggplot! Perhaps try Plot.add?\")\n\n other_type = other.__class__.__name__\n raise TypeError(f\"Unsupported operand type(s) for +: 'Plot' and '{other_type}\")\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None:\n\n if Plot.config.display[\"format\"] != \"png\":\n return None\n return self.plot()._repr_png_()\n\n def _repr_svg_(self) -> str | None:\n\n if Plot.config.display[\"format\"] != \"svg\":\n return None\n return self.plot()._repr_svg_()\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 379, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None:\n\n if Plot.config.display[\"format\"] != \"png\":\n return None\n return self.plot()._repr_png_()\n\n def _repr_svg_(self) -> str | None:\n\n if Plot.config.display[\"format\"] != \"svg\":\n return None\n return self.plot()._repr_svg_()\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 383, "name": "_repr_png_", "kind": "ref", "category": "function", "info": " return self.plot()._repr_png_()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 385, "name": "_repr_svg_", "kind": "def", "category": "function", "info": " def _repr_svg_(self) -> str | None:\n\n if Plot.config.display[\"format\"] != \"svg\":\n return None\n return self.plot()._repr_svg_()\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 389, "name": "_repr_svg_", "kind": "ref", "category": "function", "info": " return self.plot()._repr_svg_()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 391, "name": "_clone", "kind": "def", "category": "function", "info": " def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 393, "name": "Plot", "kind": "ref", "category": "function", "info": " new = Plot()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 417, "name": "_theme_with_defaults", "kind": "def", "category": "function", "info": " def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 424, "name": "_variables", "kind": "def", "category": "function", "info": " def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 438, "name": "on", "kind": "def", "category": "function", "info": " def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 479, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 559, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 573, "name": "pair", "kind": "def", "category": "function", "info": " def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 632, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 636, "name": "facet", "kind": "def", "category": "function", "info": " def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 692, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 699, "name": "scale", "kind": "def", "category": "function", "info": " def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 722, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 726, "name": "share", "kind": "def", "category": "function", "info": " def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 741, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 745, "name": "limit", "kind": "def", "category": "function", "info": " def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 762, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 766, "name": "label", "kind": "def", "category": "function", "info": " def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 788, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 794, "name": "layout", "kind": "def", "category": "function", "info": " def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 828, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 839, "name": "theme", "kind": "def", "category": "function", "info": " def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 859, "name": "_clone", "kind": "ref", "category": "function", "info": " new = self._clone()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 872, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 886, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 886, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 887, "name": "_plot", "kind": "ref", "category": "function", "info": " self._plot().save(loc, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 887, "name": "save", "kind": "ref", "category": "function", "info": " self._plot().save(loc, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 914, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 914, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " with theme_context(self._theme_with_defaults()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 915, "name": "_plot", "kind": "ref", "category": "function", "info": " return self._plot(pyplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 917, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 922, "name": "Plotter", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 922, "name": "_theme_with_defaults", "kind": "ref", "category": "function", "info": " plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 925, "name": "_extract_data", "kind": "ref", "category": "function", "info": " common, layers = plotter._extract_data(self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 926, "name": "_setup_figure", "kind": "ref", "category": "function", "info": " plotter._setup_figure(self, common, layers)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 929, "name": "match", "kind": "ref", "category": "function", "info": " coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 930, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, common, layers, coord_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 933, "name": "_compute_stats", "kind": "ref", "category": "function", "info": " plotter._compute_stats(self, layers)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 936, "name": "_setup_scales", "kind": "ref", "category": "function", "info": " plotter._setup_scales(self, common, layers)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 945, "name": "_plot_layer", "kind": "ref", "category": "function", "info": " plotter._plot_layer(self, layer)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 948, "name": "_make_legend", "kind": "ref", "category": "function", "info": " plotter._make_legend(self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 949, "name": "_finalize_figure", "kind": "ref", "category": "function", "info": " plotter._finalize_figure(self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 957, "name": "Plotter", "kind": "def", "category": "class", "info": "__init__\tsave\tshow\t_repr_png_\t_repr_svg_\t_extract_data\t_resolve_label\t_setup_figure\t_compute_stats\t_get_scale\t_get_subplot_data\t_setup_scales\t_plot_layer\t_unscale_coords\t_generate_pairings\t_get_subplot_index\t_filter_subplot_data\t_setup_split_generator\t_update_legend_contents\t_make_legend\t_finalize_figure"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 978, "name": "save", "kind": "def", "category": "function", "info": " def save(self, loc, **kwargs) -> Plotter: # TODO type args\n kwargs.setdefault(\"dpi\", 96)\n try:\n loc = os.path.expanduser(loc)\n except TypeError:\n # loc may be a buffer in which case that would not work\n pass\n self._figure.savefig(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Display the plot by hooking into pyplot.\n\n This method calls :func:`matplotlib.pyplot.show` with any keyword parameters.\n\n \"\"\"\n # TODO if we did not create the Plotter with pyplot, is it possible to do this?\n # If not we should clearly raise.\n import matplotlib.pyplot as plt\n with theme_context(self._theme):\n plt.show(**kwargs)\n\n # TODO API for accessing the underlying matplotlib objects\n # TODO what else is useful in the public API for this class?\n\n def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None:\n\n # TODO use matplotlib backend directly instead of going through savefig?\n\n # TODO perhaps have self.show() flip a switch to disable this, so that\n # user does not end up with two versions of the figure in the output\n\n # TODO use bbox_inches=\"tight\" like the inline backend?\n # pro: better results, con: (sometimes) confusing results\n # Better solution would be to default (with option to change)\n # to using constrained/tight layout.\n\n if Plot.config.display[\"format\"] != \"png\":\n return None\n\n buffer = io.BytesIO()\n\n factor = 2 if Plot.config.display[\"hidpi\"] else 1\n scaling = Plot.config.display[\"scaling\"] / factor\n dpi = 96 * factor # TODO put dpi in Plot.config?\n\n with theme_context(self._theme): # TODO _theme_with_defaults?\n self._figure.savefig(buffer, dpi=dpi, format=\"png\", bbox_inches=\"tight\")\n data = buffer.getvalue()\n\n w, h = Image.open(buffer).size\n metadata = {\"width\": w * scaling, \"height\": h * scaling}\n return data, metadata\n\n def _repr_svg_(self) -> str | None:\n\n if Plot.config.display[\"format\"] != \"svg\":\n return None\n\n # TODO DPI for rasterized artists?\n\n scaling = Plot.config.display[\"scaling\"]\n\n buffer = io.StringIO()\n with theme_context(self._theme): # TODO _theme_with_defaults?\n self._figure.savefig(buffer, format=\"svg\", bbox_inches=\"tight\")\n\n root = ElementTree.fromstring(buffer.getvalue())\n w = scaling * float(root.attrib[\"width\"][:-2])\n h = scaling * float(root.attrib[\"height\"][:-2])\n root.attrib.update(width=f\"{w}pt\", height=f\"{h}pt\", viewbox=f\"0 0 {w} {h}\")\n ElementTree.ElementTree(root).write(out := io.BytesIO())\n\n return out.getvalue().decode()\n\n def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 981, "name": "expanduser", "kind": "ref", "category": "function", "info": " loc = os.path.expanduser(loc)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 998, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1004, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self) -> tuple[bytes, dict[str, float]] | None:\n\n if Plot.config.display[\"format\"] != \"png\":\n return None\n return self.plot()._repr_png_()\n\n def _repr_svg_(self) -> str | None:\n\n if Plot.config.display[\"format\"] != \"svg\":\n return None\n return self.plot()._repr_svg_()\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1025, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme): # TODO _theme_with_defaults?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1027, "name": "getvalue", "kind": "ref", "category": "function", "info": " data = buffer.getvalue()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1033, "name": "_repr_svg_", "kind": "def", "category": "function", "info": " def _repr_svg_(self) -> str | None:\n\n if Plot.config.display[\"format\"] != \"svg\":\n return None\n return self.plot()._repr_svg_()\n\n def _clone(self) -> Plot:\n \"\"\"Generate a new object with the same information as the current spec.\"\"\"\n new = Plot()\n\n # TODO any way to enforce that data does not get mutated?\n new._data = self._data\n\n new._layers.extend(self._layers)\n\n new._scales.update(self._scales)\n new._shares.update(self._shares)\n new._limits.update(self._limits)\n new._labels.update(self._labels)\n new._theme.update(self._theme)\n\n new._facet_spec.update(self._facet_spec)\n new._pair_spec.update(self._pair_spec)\n\n new._figure_spec.update(self._figure_spec)\n new._subplot_spec.update(self._subplot_spec)\n new._layout_spec.update(self._layout_spec)\n\n new._target = self._target\n\n return new\n\n def _theme_with_defaults(self) -> dict[str, Any]:\n\n theme = self.config.theme.copy()\n theme.update(self._theme)\n return theme\n\n @property\n def _variables(self) -> list[str]:\n\n variables = (\n list(self._data.frame)\n + list(self._pair_spec.get(\"variables\", []))\n + list(self._facet_spec.get(\"variables\", []))\n )\n for layer in self._layers:\n variables.extend(v for v in layer[\"vars\"] if v not in variables)\n\n # Coerce to str in return to appease mypy; we know these will only\n # ever be strings but I don't think we can type a DataFrame that way yet\n return [str(v) for v in variables]\n\n def on(self, target: Axes | SubFigure | Figure) -> Plot:\n \"\"\"\n Provide existing Matplotlib figure or axes for drawing the plot.\n\n When using this method, you will also need to explicitly call a method that\n triggers compilation, such as :meth:`Plot.show` or :meth:`Plot.save`. If you\n want to postprocess using matplotlib, you'd need to call :meth:`Plot.plot`\n first to compile the plot without rendering it.\n\n Parameters\n ----------\n target : Axes, SubFigure, or Figure\n Matplotlib object to use. Passing :class:`matplotlib.axes.Axes` will add\n artists without otherwise modifying the figure. Otherwise, subplots will be\n created within the space of the given :class:`matplotlib.figure.Figure` or\n :class:`matplotlib.figure.SubFigure`.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.on.rst\n\n \"\"\"\n accepted_types: tuple # Allow tuple of various length\n if hasattr(mpl.figure, \"SubFigure\"): # Added in mpl 3.4\n accepted_types = (\n mpl.axes.Axes, mpl.figure.SubFigure, mpl.figure.Figure\n )\n accepted_types_str = (\n f\"{mpl.axes.Axes}, {mpl.figure.SubFigure}, or {mpl.figure.Figure}\"\n )\n else:\n accepted_types = mpl.axes.Axes, mpl.figure.Figure\n accepted_types_str = f\"{mpl.axes.Axes} or {mpl.figure.Figure}\"\n\n if not isinstance(target, accepted_types):\n err = (\n f\"The `Plot.on` target must be an instance of {accepted_types_str}. \"\n f\"You passed an instance of {target.__class__} instead.\"\n )\n raise TypeError(err)\n\n new = self._clone()\n new._target = target\n\n return new\n\n def add(\n self,\n mark: Mark,\n *transforms: Stat | Mark,\n orient: str | None = None,\n legend: bool = True,\n data: DataSource = None,\n **variables: VariableSpec,\n ) -> Plot:\n \"\"\"\n Specify a layer of the visualization in terms of mark and data transform(s).\n\n This is the main method for specifying how the data should be visualized.\n It can be called multiple times with different arguments to define\n a plot with multiple layers.\n\n Parameters\n ----------\n mark : :class:`Mark`\n The visual representation of the data to use in this layer.\n transforms : :class:`Stat` or :class:`Move`\n Objects representing transforms to be applied before plotting the data.\n Currently, at most one :class:`Stat` can be used, and it\n must be passed first. This constraint will be relaxed in the future.\n orient : \"x\", \"y\", \"v\", or \"h\"\n The orientation of the mark, which also affects how transforms are computed.\n Typically corresponds to the axis that defines groups for aggregation.\n The \"v\" (vertical) and \"h\" (horizontal) options are synonyms for \"x\" / \"y\",\n but may be more intuitive with some marks. When not provided, an\n orientation will be inferred from characteristics of the data and scales.\n legend : bool\n Option to suppress the mark/mappings for this layer from the legend.\n data : DataFrame or dict\n Data source to override the global source provided in the constructor.\n variables : data vectors or identifiers\n Additional layer-specific variables, including variables that will be\n passed directly to the transforms without scaling.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.add.rst\n\n \"\"\"\n if not isinstance(mark, Mark):\n msg = f\"mark must be a Mark instance, not {type(mark)!r}.\"\n raise TypeError(msg)\n\n # TODO This API for transforms was a late decision, and previously Plot.add\n # accepted 0 or 1 Stat instances and 0, 1, or a list of Move instances.\n # It will take some work to refactor the internals so that Stat and Move are\n # treated identically, and until then well need to \"unpack\" the transforms\n # here and enforce limitations on the order / types.\n\n stat: Optional[Stat]\n move: Optional[List[Move]]\n error = False\n if not transforms:\n stat, move = None, None\n elif isinstance(transforms[0], Stat):\n stat = transforms[0]\n move = [m for m in transforms[1:] if isinstance(m, Move)]\n error = len(move) != len(transforms) - 1\n else:\n stat = None\n move = [m for m in transforms if isinstance(m, Move)]\n error = len(move) != len(transforms)\n\n if error:\n msg = \" \".join([\n \"Transforms must have at most one Stat type (in the first position),\",\n \"and all others must be a Move type. Given transform type(s):\",\n \", \".join(str(type(t).__name__) for t in transforms) + \".\"\n ])\n raise TypeError(msg)\n\n new = self._clone()\n new._layers.append({\n \"mark\": mark,\n \"stat\": stat,\n \"move\": move,\n # TODO it doesn't work to supply scalars to variables, but it should\n \"vars\": variables,\n \"source\": data,\n \"legend\": legend,\n \"orient\": {\"v\": \"x\", \"h\": \"y\"}.get(orient, orient), # type: ignore\n })\n\n return new\n\n def pair(\n self,\n x: VariableSpecList = None,\n y: VariableSpecList = None,\n wrap: int | None = None,\n cross: bool = True,\n ) -> Plot:\n \"\"\"\n Produce subplots by pairing multiple `x` and/or `y` variables.\n\n Parameters\n ----------\n x, y : sequence(s) of data vectors or identifiers\n Variables that will define the grid of subplots.\n wrap : int\n When using only `x` or `y`, \"wrap\" subplots across a two-dimensional grid\n with this many columns (when using `x`) or rows (when using `y`).\n cross : bool\n When False, zip the `x` and `y` lists such that the first subplot gets the\n first pair, the second gets the second pair, etc. Otherwise, create a\n two-dimensional grid from the cartesian product of the lists.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.pair.rst\n\n \"\"\"\n # TODO Add transpose= arg, which would then draw pair(y=[...]) across rows\n # This may also be possible by setting `wrap=1`, but is that too unobvious?\n # TODO PairGrid features not currently implemented: diagonals, corner\n\n pair_spec: PairSpec = {}\n\n axes = {\"x\": [] if x is None else x, \"y\": [] if y is None else y}\n for axis, arg in axes.items():\n if isinstance(arg, (str, int)):\n err = f\"You must pass a sequence of variable keys to `{axis}`\"\n raise TypeError(err)\n\n pair_spec[\"variables\"] = {}\n pair_spec[\"structure\"] = {}\n\n for axis in \"xy\":\n keys = []\n for i, col in enumerate(axes[axis]):\n key = f\"{axis}{i}\"\n keys.append(key)\n pair_spec[\"variables\"][key] = col\n\n if keys:\n pair_spec[\"structure\"][axis] = keys\n\n if not cross and len(axes[\"x\"]) != len(axes[\"y\"]):\n err = \"Lengths of the `x` and `y` lists must match with cross=False\"\n raise ValueError(err)\n\n pair_spec[\"cross\"] = cross\n pair_spec[\"wrap\"] = wrap\n\n new = self._clone()\n new._pair_spec.update(pair_spec)\n return new\n\n def facet(\n self,\n col: VariableSpec = None,\n row: VariableSpec = None,\n order: OrderSpec | dict[str, OrderSpec] = None,\n wrap: int | None = None,\n ) -> Plot:\n \"\"\"\n Produce subplots with conditional subsets of the data.\n\n Parameters\n ----------\n col, row : data vectors or identifiers\n Variables used to define subsets along the columns and/or rows of the grid.\n Can be references to the global data source passed in the constructor.\n order : list of strings, or dict with dimensional keys\n Define the order of the faceting variables.\n wrap : int\n When using only `col` or `row`, wrap subplots across a two-dimensional\n grid with this many subplots on the faceting dimension.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.facet.rst\n\n \"\"\"\n variables: dict[str, VariableSpec] = {}\n if col is not None:\n variables[\"col\"] = col\n if row is not None:\n variables[\"row\"] = row\n\n structure = {}\n if isinstance(order, dict):\n for dim in [\"col\", \"row\"]:\n dim_order = order.get(dim)\n if dim_order is not None:\n structure[dim] = list(dim_order)\n elif order is not None:\n if col is not None and row is not None:\n err = \" \".join([\n \"When faceting on both col= and row=, passing `order` as a list\"\n \"is ambiguous. Use a dict with 'col' and/or 'row' keys instead.\"\n ])\n raise RuntimeError(err)\n elif col is not None:\n structure[\"col\"] = list(order)\n elif row is not None:\n structure[\"row\"] = list(order)\n\n spec: FacetSpec = {\n \"variables\": variables,\n \"structure\": structure,\n \"wrap\": wrap,\n }\n\n new = self._clone()\n new._facet_spec.update(spec)\n\n return new\n\n # TODO def twin()?\n\n def scale(self, **scales: Scale) -> Plot:\n \"\"\"\n Specify mappings from data units to visual properties.\n\n Keywords correspond to variables defined in the plot, including coordinate\n variables (`x`, `y`) and semantic variables (`color`, `pointsize`, etc.).\n\n A number of \"magic\" arguments are accepted, including:\n - The name of a transform (e.g., `\"log\"`, `\"sqrt\"`)\n - The name of a palette (e.g., `\"viridis\"`, `\"muted\"`)\n - A tuple of values, defining the output range (e.g. `(1, 5)`)\n - A dict, implying a :class:`Nominal` scale (e.g. `{\"a\": .2, \"b\": .5}`)\n - A list of values, implying a :class:`Nominal` scale (e.g. `[\"b\", \"r\"]`)\n\n For more explicit control, pass a scale spec object such as :class:`Continuous`\n or :class:`Nominal`. Or pass `None` to use an \"identity\" scale, which treats\n data values as literally encoding visual properties.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.scale.rst\n\n \"\"\"\n new = self._clone()\n new._scales.update(scales)\n return new\n\n def share(self, **shares: bool | str) -> Plot:\n \"\"\"\n Control sharing of axis limits and ticks across subplots.\n\n Keywords correspond to variables defined in the plot, and values can be\n boolean (to share across all subplots), or one of \"row\" or \"col\" (to share\n more selectively across one dimension of a grid).\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.share.rst\n\n \"\"\"\n new = self._clone()\n new._shares.update(shares)\n return new\n\n def limit(self, **limits: tuple[Any, Any]) -> Plot:\n \"\"\"\n Control the range of visible data.\n\n Keywords correspond to variables defined in the plot, and values are a\n `(min, max)` tuple (where either can be `None` to leave unset).\n\n Limits apply only to the axis; data outside the visible range are\n still used for any stat transforms and added to the plot.\n\n Behavior for non-coordinate variables is currently undefined.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.limit.rst\n\n \"\"\"\n new = self._clone()\n new._limits.update(limits)\n return new\n\n def label(self, *, title=None, **variables: str | Callable[[str], str]) -> Plot:\n \"\"\"\n Control the labels and titles for axes, legends, and subplots.\n\n Additional keywords correspond to variables defined in the plot.\n Values can be one of the following types:\n\n - string (used literally; pass \"\" to clear the default label)\n - function (called on the default label)\n\n For coordinate variables, the value sets the axis label.\n For semantic variables, the value sets the legend title.\n For faceting variables, `title=` modifies the subplot-specific label,\n while `col=` and/or `row=` add a label for the faceting variable.\n When using a single subplot, `title=` sets its title.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.label.rst\n\n\n \"\"\"\n new = self._clone()\n if title is not None:\n new._labels[\"title\"] = title\n new._labels.update(variables)\n return new\n\n def layout(\n self,\n *,\n size: tuple[float, float] | Default = default,\n engine: str | None | Default = default,\n ) -> Plot:\n \"\"\"\n Control the figure size and layout.\n\n .. note::\n\n Default figure sizes and the API for specifying the figure size are subject\n to change in future \"experimental\" releases of the objects API. The default\n layout engine may also change.\n\n Parameters\n ----------\n size : (width, height)\n Size of the resulting figure, in inches. Size is inclusive of legend when\n using pyplot, but not otherwise.\n engine : {{\"tight\", \"constrained\", None}}\n Name of method for automatically adjusting the layout to remove overlap.\n The default depends on whether :meth:`Plot.on` is used.\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.layout.rst\n\n \"\"\"\n # TODO add an \"auto\" mode for figsize that roughly scales with the rcParams\n # figsize (so that works), but expands to prevent subplots from being squished\n # Also should we have height=, aspect=, exclusive with figsize? Or working\n # with figsize when only one is defined?\n\n new = self._clone()\n\n if size is not default:\n new._figure_spec[\"figsize\"] = size\n if engine is not default:\n new._layout_spec[\"engine\"] = engine\n\n return new\n\n # TODO def legend (ugh)\n\n def theme(self, *args: dict[str, Any]) -> Plot:\n \"\"\"\n Control the appearance of elements in the plot.\n\n .. note::\n\n The API for customizing plot appearance is not yet finalized.\n Currently, the only valid argument is a dict of matplotlib rc parameters.\n (This dict must be passed as a positional argument.)\n\n It is likely that this method will be enhanced in future releases.\n\n Matplotlib rc parameters are documented on the following page:\n https://matplotlib.org/stable/tutorials/introductory/customizing.html\n\n Examples\n --------\n .. include:: ../docstrings/objects.Plot.theme.rst\n\n \"\"\"\n new = self._clone()\n\n # We can skip this whole block on Python 3.8+ with positional-only syntax\n nargs = len(args)\n if nargs != 1:\n err = f\"theme() takes 1 positional argument, but {nargs} were given\"\n raise TypeError(err)\n\n rc = mpl.RcParams(args[0])\n new._theme.update(rc)\n\n return new\n\n def save(self, loc, **kwargs) -> Plot:\n \"\"\"\n Compile the plot and write it to a buffer or file on disk.\n\n Parameters\n ----------\n loc : str, path, or buffer\n Location on disk to save the figure, or a buffer to write into.\n kwargs\n Other keyword arguments are passed through to\n :meth:`matplotlib.figure.Figure.savefig`.\n\n \"\"\"\n # TODO expose important keyword arguments in our signature?\n with theme_context(self._theme_with_defaults()):\n self._plot().save(loc, **kwargs)\n return self\n\n def show(self, **kwargs) -> None:\n \"\"\"\n Compile the plot and display it by hooking into pyplot.\n\n Calling this method is not necessary to render a plot in notebook context,\n but it may be in other environments (e.g., in a terminal). After compiling the\n plot, it calls :func:`matplotlib.pyplot.show` (passing any keyword parameters).\n\n Unlike other :class:`Plot` methods, there is no return value. This should be\n the last method you call when specifying a plot.\n\n \"\"\"\n # TODO make pyplot configurable at the class level, and when not using,\n # import IPython.display and call on self to populate cell output?\n\n # Keep an eye on whether matplotlib implements \"attaching\" an existing\n # figure to pyplot: https://github.com/matplotlib/matplotlib/pull/14024\n\n self.plot(pyplot=True).show(**kwargs)\n\n def plot(self, pyplot: bool = False) -> Plotter:\n \"\"\"\n Compile the plot spec and return the Plotter object.\n \"\"\"\n with theme_context(self._theme_with_defaults()):\n return self._plot(pyplot)\n\n def _plot(self, pyplot: bool = False) -> Plotter:\n\n # TODO if we have _target object, pyplot should be determined by whether it\n # is hooked into the pyplot state machine (how do we check?)\n\n plotter = Plotter(pyplot=pyplot, theme=self._theme_with_defaults())\n\n # Process the variable assignments and initialize the figure\n common, layers = plotter._extract_data(self)\n plotter._setup_figure(self, common, layers)\n\n # Process the scale spec for coordinate variables and transform their data\n coord_vars = [v for v in self._variables if re.match(r\"^x|y\", v)]\n plotter._setup_scales(self, common, layers, coord_vars)\n\n # Apply statistical transform(s)\n plotter._compute_stats(self, layers)\n\n # Process scale spec for semantic variables and coordinates computed by stat\n plotter._setup_scales(self, common, layers)\n\n # TODO Remove these after updating other methods\n # ---- Maybe have debug= param that attaches these when True?\n plotter._data = common\n plotter._layers = layers\n\n # Process the data for each layer and add matplotlib artists\n for layer in layers:\n plotter._plot_layer(self, layer)\n\n # Add various figure decorations\n plotter._make_legend(self)\n plotter._finalize_figure(self)\n\n return plotter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1043, "name": "theme_context", "kind": "ref", "category": "function", "info": " with theme_context(self._theme): # TODO _theme_with_defaults?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1046, "name": "getvalue", "kind": "ref", "category": "function", "info": " root = ElementTree.fromstring(buffer.getvalue())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1052, "name": "getvalue", "kind": "ref", "category": "function", "info": " return out.getvalue().decode()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1052, "name": "decode", "kind": "ref", "category": "function", "info": " return out.getvalue().decode()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1054, "name": "_extract_data", "kind": "def", "category": "function", "info": " def _extract_data(self, p: Plot) -> tuple[PlotData, list[Layer]]:\n\n common_data = (\n p._data\n .join(None, p._facet_spec.get(\"variables\"))\n .join(None, p._pair_spec.get(\"variables\"))\n )\n\n layers: list[Layer] = []\n for layer in p._layers:\n spec = layer.copy()\n spec[\"data\"] = common_data.join(layer.get(\"source\"), layer.get(\"vars\"))\n layers.append(spec)\n\n return common_data, layers\n\n def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1070, "name": "_resolve_label", "kind": "def", "category": "function", "info": " def _resolve_label(self, p: Plot, var: str, auto_label: str | None) -> str:\n\n label: str\n if var in p._labels:\n manual_label = p._labels[var]\n if callable(manual_label) and auto_label is not None:\n label = manual_label(auto_label)\n else:\n label = cast(str, manual_label)\n elif auto_label is None:\n label = \"\"\n else:\n label = auto_label\n return label\n\n def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1076, "name": "manual_label", "kind": "ref", "category": "function", "info": " label = manual_label(auto_label)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1085, "name": "_setup_figure", "kind": "def", "category": "function", "info": " def _setup_figure(self, p: Plot, common: PlotData, layers: list[Layer]) -> None:\n\n # --- Parsing the faceting/pairing parameterization to specify figure grid\n\n subplot_spec = p._subplot_spec.copy()\n facet_spec = p._facet_spec.copy()\n pair_spec = p._pair_spec.copy()\n\n for axis in \"xy\":\n if axis in p._shares:\n subplot_spec[f\"share{axis}\"] = p._shares[axis]\n\n for dim in [\"col\", \"row\"]:\n if dim in common.frame and dim not in facet_spec[\"structure\"]:\n order = categorical_order(common.frame[dim])\n facet_spec[\"structure\"][dim] = order\n\n self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n\n # --- Figure initialization\n self._figure = subplots.init_figure(\n pair_spec, self._pyplot, p._figure_spec, p._target,\n )\n\n # --- Figure annotation\n for sub in subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n\n # ~~ Axis labels\n\n # TODO Should we make it possible to use only one x/y label for\n # all rows/columns in a faceted plot? Maybe using sub{axis}label,\n # although the alignments of the labels from that method leaves\n # something to be desired (in terms of how it defines 'centered').\n names = [\n common.names.get(axis_key),\n *(layer[\"data\"].names.get(axis_key) for layer in layers)\n ]\n auto_label = next((name for name in names if name is not None), None)\n label = self._resolve_label(p, axis_key, auto_label)\n ax.set(**{f\"{axis}label\": label})\n\n # ~~ Decoration visibility\n\n # TODO there should be some override (in Plot.layout?) so that\n # axis / tick labels can be shown on interior shared axes if desired\n\n axis_obj = getattr(ax, f\"{axis}axis\")\n visible_side = {\"x\": \"bottom\", \"y\": \"left\"}.get(axis)\n show_axis_label = (\n sub[visible_side]\n or not p._pair_spec.get(\"cross\", True)\n or (\n axis in p._pair_spec.get(\"structure\", {})\n and bool(p._pair_spec.get(\"wrap\"))\n )\n )\n axis_obj.get_label().set_visible(show_axis_label)\n\n show_tick_labels = (\n show_axis_label\n or subplot_spec.get(f\"share{axis}\") not in (\n True, \"all\", {\"x\": \"col\", \"y\": \"row\"}[axis]\n )\n )\n for group in (\"major\", \"minor\"):\n for t in getattr(axis_obj, f\"get_{group}ticklabels\")():\n t.set_visible(show_tick_labels)\n\n # TODO we want right-side titles for row facets in most cases?\n # Let's have what we currently call \"margin titles\" but properly using the\n # ax.set_title interface (see my gist)\n title_parts = []\n for dim in [\"col\", \"row\"]:\n if sub[dim] is not None:\n val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n if dim in p._labels:\n key = self._resolve_label(p, dim, common.names.get(dim))\n val = f\"{key} {val}\"\n title_parts.append(val)\n\n has_col = sub[\"col\"] is not None\n has_row = sub[\"row\"] is not None\n show_title = (\n has_col and has_row\n or (has_col or has_row) and p._facet_spec.get(\"wrap\")\n or (has_col and sub[\"top\"])\n # TODO or has_row and sub[\"right\"] and \n or has_row # TODO and not \n )\n if title_parts:\n title = \" | \".join(title_parts)\n title_text = ax.set_title(title)\n title_text.set_visible(show_title)\n elif not (has_col or has_row):\n title = self._resolve_label(p, \"title\", None)\n title_text = ax.set_title(title)\n\n def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1099, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(common.frame[dim])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1102, "name": "Subplots", "kind": "ref", "category": "function", "info": " self._subplots = subplots = Subplots(subplot_spec, facet_spec, pair_spec)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1105, "name": "init_figure", "kind": "ref", "category": "function", "info": " self._figure = subplots.init_figure(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1126, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " label = self._resolve_label(p, axis_key, auto_label)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1162, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " val = self._resolve_label(p, \"title\", f\"{sub[dim]}\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1164, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " key = self._resolve_label(p, dim, common.names.get(dim))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1182, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " title = self._resolve_label(p, \"title\", None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1185, "name": "_compute_stats", "kind": "def", "category": "function", "info": " def _compute_stats(self, spec: Plot, layers: list[Layer]) -> None:\n\n grouping_vars = [v for v in PROPERTIES if v not in \"xy\"]\n grouping_vars += [\"col\", \"row\", \"group\"]\n\n pair_vars = spec._pair_spec.get(\"structure\", {})\n\n for layer in layers:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n stat = layer[\"stat\"]\n\n if stat is None:\n continue\n\n iter_axes = itertools.product(*[\n pair_vars.get(axis, [axis]) for axis in \"xy\"\n ])\n\n old = data.frame\n\n if pair_vars:\n data.frames = {}\n data.frame = data.frame.iloc[:0] # TODO to simplify typing\n\n for coord_vars in iter_axes:\n\n pairings = \"xy\", coord_vars\n\n df = old.copy()\n scales = self._scales.copy()\n\n for axis, var in zip(*pairings):\n if axis != var:\n df = df.rename(columns={var: axis})\n drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n df = df.drop(drop_cols, axis=1)\n scales[axis] = scales[var]\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n if stat.group_by_orient:\n grouper = [orient, *grouping_vars]\n else:\n grouper = grouping_vars\n groupby = GroupBy(grouper)\n res = stat(df, groupby, orient, scales)\n\n if pair_vars:\n data.frames[coord_vars] = res\n else:\n data.frame = res\n\n def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1221, "name": "match", "kind": "ref", "category": "function", "info": " drop_cols = [x for x in df if re.match(rf\"{axis}\\d+\", str(x))]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1225, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1231, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(grouper)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1239, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(\n self, spec: Plot, var: str, prop: Property, values: Series\n ) -> Scale:\n\n if var in spec._scales:\n arg = spec._scales[var]\n if arg is None or isinstance(arg, Scale):\n scale = arg\n else:\n scale = prop.infer_scale(arg, values)\n else:\n scale = prop.default_scale(values)\n\n return scale\n\n def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1248, "name": "infer_scale", "kind": "ref", "category": "function", "info": " scale = prop.infer_scale(arg, values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1250, "name": "default_scale", "kind": "ref", "category": "function", "info": " scale = prop.default_scale(values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1254, "name": "_get_subplot_data", "kind": "def", "category": "function", "info": " def _get_subplot_data(self, df, var, view, share_state):\n\n if share_state in [True, \"all\"]:\n # The all-shared case is easiest, every subplot sees all the data\n seed_values = df[var]\n else:\n # Otherwise, we need to setup separate scales for different subplots\n if share_state in [False, \"none\"]:\n # Fully independent axes are also easy: use each subplot's data\n idx = self._get_subplot_index(df, view)\n elif share_state in df:\n # Sharing within row/col is more complicated\n use_rows = df[share_state] == view[share_state]\n idx = df.index[use_rows]\n else:\n # This configuration doesn't make much sense, but it's fine\n idx = df.index\n\n seed_values = df.loc[idx, var]\n\n return seed_values\n\n def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1263, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(df, view)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1276, "name": "_setup_scales", "kind": "def", "category": "function", "info": " def _setup_scales(\n self, p: Plot,\n common: PlotData,\n layers: list[Layer],\n variables: list[str] | None = None,\n ) -> None:\n\n if variables is None:\n # Add variables that have data but not a scale, which happens\n # because this method can be called multiple time, to handle\n # variables added during the Stat transform.\n variables = []\n for layer in layers:\n variables.extend(layer[\"data\"].frame.columns)\n for df in layer[\"data\"].frames.values():\n variables.extend(str(v) for v in df if v not in variables)\n variables = [v for v in variables if v not in self._scales]\n\n for var in variables:\n\n # Determine whether this is a coordinate variable\n # (i.e., x/y, paired x/y, or derivative such as xmax)\n m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n if m is None:\n coord = axis = None\n else:\n coord = m[\"coord\"]\n axis = m[\"axis\"]\n\n # Get keys that handle things like x0, xmax, properly where relevant\n prop_key = var if axis is None else axis\n scale_key = var if coord is None else coord\n\n if prop_key not in PROPERTIES:\n continue\n\n # Concatenate layers, using only the relevant coordinate and faceting vars,\n # This is unnecessarily wasteful, as layer data will often be redundant.\n # But figuring out the minimal amount we need is more complicated.\n cols = [var, \"col\", \"row\"]\n parts = [common.frame.filter(cols)]\n for layer in layers:\n parts.append(layer[\"data\"].frame.filter(cols))\n for df in layer[\"data\"].frames.values():\n parts.append(df.filter(cols))\n var_df = pd.concat(parts, ignore_index=True)\n\n prop = PROPERTIES[prop_key]\n scale = self._get_scale(p, scale_key, prop, var_df[var])\n\n if scale_key not in p._variables:\n # TODO this implies that the variable was added by the stat\n # It allows downstream orientation inference to work properly.\n # But it feels rather hacky, so ideally revisit.\n scale._priority = 0 # type: ignore\n\n if axis is None:\n # We could think about having a broader concept of (un)shared properties\n # In general, not something you want to do (different scales in facets)\n # But could make sense e.g. with paired plots. Build later.\n share_state = None\n subplots = []\n else:\n share_state = self._subplots.subplot_spec[f\"share{axis}\"]\n subplots = [view for view in self._subplots if view[axis] == coord]\n\n # Shared categorical axes are broken on matplotlib<3.4.0.\n # https://github.com/matplotlib/matplotlib/pull/18308\n # This only affects us when sharing *paired* axes. This is a novel/niche\n # behavior, so we will raise rather than hack together a workaround.\n if axis is not None and _version_predates(mpl, \"3.4\"):\n paired_axis = axis in p._pair_spec.get(\"structure\", {})\n cat_scale = isinstance(scale, Nominal)\n ok_dim = {\"x\": \"col\", \"y\": \"row\"}[axis]\n shared_axes = share_state not in [False, \"none\", ok_dim]\n if paired_axis and cat_scale and shared_axes:\n err = \"Sharing paired categorical axes requires matplotlib>=3.4.0\"\n raise RuntimeError(err)\n\n if scale is None:\n self._scales[var] = Scale._identity()\n else:\n try:\n self._scales[var] = scale._setup(var_df[var], prop)\n except Exception as err:\n raise PlotSpecError._during(\"Scale setup\", var) from err\n\n if axis is None or (var != coord and coord in p._variables):\n # Everything below here applies only to coordinate variables\n continue\n\n # Set up an empty series to receive the transformed values.\n # We need this to handle piecemeal transforms of categories -> floats.\n transformed_data = []\n for layer in layers:\n index = layer[\"data\"].frame.index\n empty_series = pd.Series(dtype=float, index=index, name=var)\n transformed_data.append(empty_series)\n\n for view in subplots:\n\n axis_obj = getattr(view[\"ax\"], f\"{axis}axis\")\n seed_values = self._get_subplot_data(var_df, var, view, share_state)\n view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var not in layer_df:\n continue\n\n idx = self._get_subplot_index(layer_df, view)\n try:\n new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n except Exception as err:\n spec_error = PlotSpecError._during(\"Scaling operation\", var)\n raise spec_error from err\n\n # Now the transformed data series are complete, update the layer data\n for layer, new_series in zip(layers, transformed_data):\n layer_df = layer[\"data\"].frame\n if var in layer_df:\n layer_df[var] = pd.to_numeric(new_series)\n\n def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1298, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^(?P(?Px|y)\\d*).*\", var)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1324, "name": "_get_scale", "kind": "ref", "category": "function", "info": " scale = self._get_scale(p, scale_key, prop, var_df[var])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1346, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if axis is not None and _version_predates(mpl, \"3.4\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1356, "name": "_identity", "kind": "ref", "category": "function", "info": " self._scales[var] = Scale._identity()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1359, "name": "_setup", "kind": "ref", "category": "function", "info": " self._scales[var] = scale._setup(var_df[var], prop)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1361, "name": "_during", "kind": "ref", "category": "function", "info": " raise PlotSpecError._during(\"Scale setup\", var) from err\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1378, "name": "_get_subplot_data", "kind": "ref", "category": "function", "info": " seed_values = self._get_subplot_data(var_df, var, view, share_state)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1379, "name": "_setup", "kind": "ref", "category": "function", "info": " view_scale = scale._setup(seed_values, prop, axis=axis_obj)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1380, "name": "set_scale_obj", "kind": "ref", "category": "function", "info": " set_scale_obj(view[\"ax\"], axis, view_scale._matplotlib_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1387, "name": "_get_subplot_index", "kind": "ref", "category": "function", "info": " idx = self._get_subplot_index(layer_df, view)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1389, "name": "view_scale", "kind": "ref", "category": "function", "info": " new_series.loc[idx] = view_scale(layer_df.loc[idx, var])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1391, "name": "_during", "kind": "ref", "category": "function", "info": " spec_error = PlotSpecError._during(\"Scaling operation\", var)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1400, "name": "_plot_layer", "kind": "def", "category": "function", "info": " def _plot_layer(self, p: Plot, layer: Layer) -> None:\n\n data = layer[\"data\"]\n mark = layer[\"mark\"]\n move = layer[\"move\"]\n\n default_grouping_vars = [\"col\", \"row\", \"group\"] # TODO where best to define?\n grouping_properties = [v for v in PROPERTIES if v[0] not in \"xy\"]\n\n pair_variables = p._pair_spec.get(\"structure\", {})\n\n for subplots, df, scales in self._generate_pairings(data, pair_variables):\n\n orient = layer[\"orient\"] or mark._infer_orient(scales)\n\n def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1411, "name": "_generate_pairings", "kind": "ref", "category": "function", "info": " for subplots, df, scales in self._generate_pairings(data, pair_variables):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1413, "name": "_infer_orient", "kind": "ref", "category": "function", "info": " orient = layer[\"orient\"] or mark._infer_orient(scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1415, "name": "get_order", "kind": "def", "category": "function", "info": " def get_order(var):\n # Ignore order for x/y: they have been scaled to numeric indices,\n # so any original order is no longer valid. Default ordering rules\n # sorted unique numbers will correctly reconstruct intended order\n # TODO This is tricky, make sure we add some tests for this\n if var not in \"xy\" and var in scales:\n return getattr(scales[var], \"order\", None)\n\n if orient in df:\n width = pd.Series(index=df.index, dtype=float)\n for view in subplots:\n view_idx = self._get_subplot_data(\n df, orient, view, p._shares.get(orient)\n ).index\n view_df = df.loc[view_idx]\n if \"width\" in mark._mappable_props:\n view_width = mark._resolve(view_df, \"width\", None)\n elif \"width\" in df:\n view_width = view_df[\"width\"]\n else:\n view_width = 0.8 # TODO what default?\n spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n width.loc[view_idx] = view_width * spacing\n df[\"width\"] = width\n\n if \"baseline\" in mark._mappable_props:\n # TODO what marks should have this?\n # If we can set baseline with, e.g., Bar(), then the\n # \"other\" (e.g. y for x oriented bars) parameterization\n # is somewhat ambiguous.\n baseline = mark._resolve(df, \"baseline\", None)\n else:\n # TODO unlike width, we might not want to add baseline to data\n # if the mark doesn't use it. Practically, there is a concern about\n # Mark abstraction like Area / Ribbon\n baseline = 0 if \"baseline\" not in df else df[\"baseline\"]\n df[\"baseline\"] = baseline\n\n if move is not None:\n moves = move if isinstance(move, list) else [move]\n for move_step in moves:\n move_by = getattr(move_step, \"by\", None)\n if move_by is None:\n move_by = grouping_properties\n move_groupers = [*move_by, *default_grouping_vars]\n if move_step.group_by_orient:\n move_groupers.insert(0, orient)\n order = {var: get_order(var) for var in move_groupers}\n groupby = GroupBy(order)\n df = move_step(df, groupby, orient, scales)\n\n df = self._unscale_coords(subplots, df, orient)\n\n grouping_vars = mark._grouping_props + default_grouping_vars\n split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n\n mark._plot(split_generator, scales, orient)\n\n # TODO is this the right place for this?\n for view in self._subplots:\n view[\"ax\"].autoscale_view()\n\n if layer[\"legend\"]:\n self._update_legend_contents(p, mark, data, scales)\n\n def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1426, "name": "_get_subplot_data", "kind": "ref", "category": "function", "info": " view_idx = self._get_subplot_data(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1431, "name": "_resolve", "kind": "ref", "category": "function", "info": " view_width = mark._resolve(view_df, \"width\", None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1436, "name": "_spacing", "kind": "ref", "category": "function", "info": " spacing = scales[orient]._spacing(view_df.loc[view_idx, orient])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1445, "name": "_resolve", "kind": "ref", "category": "function", "info": " baseline = mark._resolve(df, \"baseline\", None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1462, "name": "get_order", "kind": "ref", "category": "function", "info": " order = {var: get_order(var) for var in move_groupers}\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1463, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1464, "name": "move_step", "kind": "ref", "category": "function", "info": " df = move_step(df, groupby, orient, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1466, "name": "_unscale_coords", "kind": "ref", "category": "function", "info": " df = self._unscale_coords(subplots, df, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1469, "name": "_setup_split_generator", "kind": "ref", "category": "function", "info": " split_generator = self._setup_split_generator(grouping_vars, df, subplots)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1471, "name": "_plot", "kind": "ref", "category": "function", "info": " mark._plot(split_generator, scales, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1478, "name": "_update_legend_contents", "kind": "ref", "category": "function", "info": " self._update_legend_contents(p, mark, data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1480, "name": "_unscale_coords", "kind": "def", "category": "function", "info": " def _unscale_coords(\n self, subplots: list[dict], df: DataFrame, orient: str,\n ) -> DataFrame:\n # TODO do we still have numbers in the variable name at this point?\n coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n out_df = (\n df\n .drop(coord_cols, axis=1)\n .reindex(df.columns, axis=1) # So unscaled columns retain their place\n .copy(deep=False)\n )\n\n for view in subplots:\n view_df = self._filter_subplot_data(df, view)\n axes_df = view_df[coord_cols]\n for var, values in axes_df.items():\n\n axis = getattr(view[\"ax\"], f\"{str(var)[0]}axis\")\n # TODO see https://github.com/matplotlib/matplotlib/issues/22713\n transform = axis.get_transform().inverted().transform\n inverted = transform(values)\n out_df.loc[values.index, str(var)] = inverted\n\n return out_df\n\n def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1484, "name": "match", "kind": "ref", "category": "function", "info": " coord_cols = [c for c in df if re.match(r\"^[xy]\\D*$\", str(c))]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1493, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " view_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1499, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = axis.get_transform().inverted().transform\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1505, "name": "_generate_pairings", "kind": "def", "category": "function", "info": " def _generate_pairings(\n self, data: PlotData, pair_variables: dict,\n ) -> Generator[\n tuple[list[dict], DataFrame, dict[str, Scale]], None, None\n ]:\n # TODO retype return with subplot_spec or similar\n\n iter_axes = itertools.product(*[\n pair_variables.get(axis, [axis]) for axis in \"xy\"\n ])\n\n for x, y in iter_axes:\n\n subplots = []\n for view in self._subplots:\n if (view[\"x\"] == x) and (view[\"y\"] == y):\n subplots.append(view)\n\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n elif not pair_variables:\n out_df = data.frame.copy()\n else:\n if data.frame.empty and data.frames:\n out_df = data.frames[(x, y)].copy()\n else:\n out_df = data.frame.copy()\n\n scales = self._scales.copy()\n if x in out_df:\n scales[\"x\"] = self._scales[x]\n if y in out_df:\n scales[\"y\"] = self._scales[y]\n\n for axis, var in zip(\"xy\", (x, y)):\n if axis != var:\n out_df = out_df.rename(columns={var: axis})\n cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n out_df = out_df.drop(cols, axis=1)\n\n yield subplots, out_df, scales\n\n def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1542, "name": "match", "kind": "ref", "category": "function", "info": " cols = [col for col in out_df if re.match(rf\"{axis}\\d+\", str(col))]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1547, "name": "_get_subplot_index", "kind": "def", "category": "function", "info": " def _get_subplot_index(self, df: DataFrame, subplot: dict) -> Index:\n\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df.index\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df.index[keep_rows]\n\n def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1558, "name": "_filter_subplot_data", "kind": "def", "category": "function", "info": " def _filter_subplot_data(self, df: DataFrame, subplot: dict) -> DataFrame:\n # TODO note redundancies with preceding function ... needs refactoring\n dims = df.columns.intersection([\"col\", \"row\"])\n if dims.empty:\n return df\n\n keep_rows = pd.Series(True, df.index, dtype=bool)\n for dim in dims:\n keep_rows &= df[dim] == subplot[dim]\n return df[keep_rows]\n\n def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1569, "name": "_setup_split_generator", "kind": "def", "category": "function", "info": " def _setup_split_generator(\n self, grouping_vars: list[str], df: DataFrame, subplots: list[dict[str, Any]],\n ) -> Callable[[], Generator]:\n\n grouping_keys = []\n grouping_vars = [\n v for v in grouping_vars if v in df and v not in [\"col\", \"row\"]\n ]\n for var in grouping_vars:\n order = getattr(self._scales[var], \"order\", None)\n if order is None:\n order = categorical_order(df[var])\n grouping_keys.append(order)\n\n def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1580, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = categorical_order(df[var])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1583, "name": "split_generator", "kind": "def", "category": "function", "info": " def split_generator(keep_na=False) -> Generator:\n\n for view in subplots:\n\n axes_df = self._filter_subplot_data(df, view)\n\n with pd.option_context(\"mode.use_inf_as_na\", True):\n if keep_na:\n # The simpler thing to do would be x.dropna().reindex(x.index).\n # But that doesn't work with the way that the subset iteration\n # is written below, which assumes data for grouping vars.\n # Matplotlib (usually?) masks nan data, so this should \"work\".\n # Downstream code can also drop these rows, at some speed cost.\n present = axes_df.notna().all(axis=1)\n nulled = {}\n for axis in \"xy\":\n if axis in axes_df:\n nulled[axis] = axes_df[axis].where(present)\n axes_df = axes_df.assign(**nulled)\n else:\n axes_df = axes_df.dropna()\n\n subplot_keys = {}\n for dim in [\"col\", \"row\"]:\n if view[dim] is not None:\n subplot_keys[dim] = view[dim]\n\n if not grouping_vars or not any(grouping_keys):\n if not axes_df.empty:\n yield subplot_keys, axes_df.copy(), view[\"ax\"]\n continue\n\n grouped_df = axes_df.groupby(grouping_vars, sort=False, as_index=False)\n\n for key in itertools.product(*grouping_keys):\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n df_subset = grouped_df.get_group(pd_key)\n except KeyError:\n # TODO (from initial work on categorical plots refactor)\n # We are adding this to allow backwards compatability\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n df_subset = axes_df.loc[[]]\n\n if df_subset.empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n sub_vars.update(subplot_keys)\n\n # TODO need copy(deep=...) policy (here, above, anywhere else?)\n yield sub_vars, df_subset.copy(), view[\"ax\"]\n\n return split_generator\n\n def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1587, "name": "_filter_subplot_data", "kind": "ref", "category": "function", "info": " axes_df = self._filter_subplot_data(df, view)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1623, "name": "get_group", "kind": "ref", "category": "function", "info": " df_subset = grouped_df.get_group(pd_key)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1643, "name": "_update_legend_contents", "kind": "def", "category": "function", "info": " def _update_legend_contents(\n self,\n p: Plot,\n mark: Mark,\n data: PlotData,\n scales: dict[str, Scale],\n ) -> None:\n \"\"\"Add legend artists / labels for one layer in the plot.\"\"\"\n if data.frame.empty and data.frames:\n legend_vars: list[str] = []\n for frame in data.frames.values():\n frame_vars = frame.columns.intersection(list(scales))\n legend_vars.extend(v for v in frame_vars if v not in legend_vars)\n else:\n legend_vars = list(data.frame.columns.intersection(list(scales)))\n\n # First pass: Identify the values that will be shown for each variable\n schema: list[tuple[\n tuple[str, str | int], list[str], tuple[list, list[str]]\n ]] = []\n schema = []\n for var in legend_vars:\n var_legend = scales[var]._legend\n if var_legend is not None:\n values, labels = var_legend\n for (_, part_id), part_vars, _ in schema:\n if data.ids[var] == part_id:\n # Allow multiple plot semantics to represent same data variable\n part_vars.append(var)\n break\n else:\n title = self._resolve_label(p, var, data.names[var])\n entry = (title, data.ids[var]), [var], (values, labels)\n schema.append(entry)\n\n # Second pass, generate an artist corresponding to each value\n contents: list[tuple[tuple[str, str | int], Any, list[str]]] = []\n for key, variables, (values, labels) in schema:\n artists = []\n for val in values:\n artist = mark._legend_artist(variables, val, scales)\n if artist is not None:\n artists.append(artist)\n if artists:\n contents.append((key, artists, labels))\n\n self._legend_contents.extend(contents)\n\n def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1674, "name": "_resolve_label", "kind": "ref", "category": "function", "info": " title = self._resolve_label(p, var, data.names[var])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1683, "name": "_legend_artist", "kind": "ref", "category": "function", "info": " artist = mark._legend_artist(variables, val, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1691, "name": "_make_legend", "kind": "def", "category": "function", "info": " def _make_legend(self, p: Plot) -> None:\n \"\"\"Create the legend artist(s) and add onto the figure.\"\"\"\n # Combine artists representing same information across layers\n # Input list has an entry for each distinct variable in each layer\n # Output dict has an entry for each distinct variable\n merged_contents: dict[\n tuple[str, str | int], tuple[list[Artist], list[str]],\n ] = {}\n for key, new_artists, labels in self._legend_contents:\n # Key is (name, id); we need the id to resolve variable uniqueness,\n # but will need the name in the next step to title the legend\n if key in merged_contents:\n # Copy so inplace updates don't propagate back to legend_contents\n existing_artists = merged_contents[key][0]\n for i, artist in enumerate(existing_artists):\n # Matplotlib accepts a tuple of artists and will overlay them\n if isinstance(artist, tuple):\n artist += new_artists[i],\n else:\n existing_artists[i] = artist, new_artists[i]\n else:\n merged_contents[key] = new_artists.copy(), labels\n\n # TODO explain\n loc = \"center right\" if self._pyplot else \"center left\"\n\n base_legend = None\n for (name, _), (handles, labels) in merged_contents.items():\n\n legend = mpl.legend.Legend(\n self._figure,\n handles,\n labels,\n title=name,\n loc=loc,\n bbox_to_anchor=(.98, .55),\n )\n\n if base_legend:\n # Matplotlib has no public API for this so it is a bit of a hack.\n # Ideally we'd define our own legend class with more flexibility,\n # but that is a lot of work!\n base_legend_box = base_legend.get_children()[0]\n this_legend_box = legend.get_children()[0]\n base_legend_box.get_children().extend(this_legend_box.get_children())\n else:\n base_legend = legend\n self._figure.legends.append(legend)\n\n def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1720, "name": "Legend", "kind": "ref", "category": "function", "info": " legend = mpl.legend.Legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1740, "name": "_finalize_figure", "kind": "def", "category": "function", "info": " def _finalize_figure(self, p: Plot) -> None:\n\n for sub in self._subplots:\n ax = sub[\"ax\"]\n for axis in \"xy\":\n axis_key = sub[axis]\n axis_obj = getattr(ax, f\"{axis}axis\")\n\n # Axis limits\n if axis_key in p._limits:\n convert_units = getattr(ax, f\"{axis}axis\").convert_units\n a, b = p._limits[axis_key]\n lo = a if a is None else convert_units(a)\n hi = b if b is None else convert_units(b)\n if isinstance(a, str):\n lo = cast(float, lo) - 0.5\n if isinstance(b, str):\n hi = cast(float, hi) + 0.5\n ax.set(**{f\"{axis}lim\": (lo, hi)})\n\n if axis_key in self._scales: # TODO when would it not be?\n self._scales[axis_key]._finalize(p, axis_obj)\n\n if (engine := p._layout_spec.get(\"engine\", default)) is not default:\n # None is a valid arg for Figure.set_layout_engine, hence `default`\n set_layout_engine(self._figure, engine)\n elif p._target is None:\n # Don't modify the layout engine if the user supplied their own\n # matplotlib figure and didn't specify an engine through Plot\n # TODO switch default to \"constrained\"?\n # TODO either way, make configurable\n set_layout_engine(self._figure, \"tight\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1752, "name": "convert_units", "kind": "ref", "category": "function", "info": " lo = a if a is None else convert_units(a)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1753, "name": "convert_units", "kind": "ref", "category": "function", "info": " hi = b if b is None else convert_units(b)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/plot.py", "rel_fname": "seaborn/_core/plot.py", "line": 1761, "name": "_finalize", "kind": "ref", "category": "function", "info": " self._scales[axis_key]._finalize(p, axis_obj)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 48, "name": "Property", "kind": "def", "category": "class", "info": "__init__\tdefault_scale\tinfer_scale\tget_mapping\tstandardize\t_check_dict_entries\t_check_list_length"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 63, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Scale:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n if var_type == \"numeric\":\n return Continuous()\n elif var_type == \"datetime\":\n return Temporal()\n elif var_type == \"boolean\":\n return Boolean()\n else:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 66, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 68, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 70, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 72, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 74, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 76, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 87, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(trans=arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 96, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 102, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 106, "name": "_check_dict_entries", "kind": "def", "category": "function", "info": " def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 114, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 144, "name": "Coordinate", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 155, "name": "IntervalProperty", "kind": "def", "category": "class", "info": "default_range\t_forward\t_inverse\tinfer_scale\tget_mapping\t_get_nominal_mapping\t_get_boolean_mapping\t_get_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 163, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n elif isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif var_type == \"categorical\":\n return Nominal(arg)\n elif var_type == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 167, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n elif isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif var_type == \"categorical\":\n return Nominal(arg)\n elif var_type == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 171, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n elif isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif var_type == \"categorical\":\n return Nominal(arg)\n elif var_type == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 175, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 180, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 183, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 185, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 187, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 189, "name": "Temporal", "kind": "ref", "category": "function", "info": " return Temporal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 192, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 194, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 197, "name": "_get_nominal_mapping", "kind": "ref", "category": "function", "info": " return self._get_nominal_mapping(scale, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 199, "name": "_get_boolean_mapping", "kind": "ref", "category": "function", "info": " return self._get_boolean_mapping(scale, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 202, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(self.default_range)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 204, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward(scale.values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 217, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 218, "name": "_inverse", "kind": "ref", "category": "function", "info": " return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 222, "name": "_get_nominal_mapping", "kind": "def", "category": "function", "info": " def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 224, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 225, "name": "_get_values", "kind": "ref", "category": "function", "info": " values = self._get_values(scale, levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 227, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 236, "name": "_get_boolean_mapping", "kind": "def", "category": "function", "info": " def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 238, "name": "_get_values", "kind": "ref", "category": "function", "info": " values = self._get_values(scale, [True, False])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 240, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 248, "name": "_get_values", "kind": "def", "category": "function", "info": " def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 251, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 254, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 268, "name": "_forward", "kind": "ref", "category": "function", "info": " vmin, vmax = self._forward([vmin, vmax])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 269, "name": "_inverse", "kind": "ref", "category": "function", "info": " values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 274, "name": "PointSize", "kind": "def", "category": "class", "info": "_forward\t_inverse"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 278, "name": "_forward", "kind": "def", "category": "function", "info": " def _forward(self, values):\n \"\"\"Square native values to implement linear scaling of point area.\"\"\"\n return np.square(values)\n\n def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 282, "name": "_inverse", "kind": "def", "category": "function", "info": " def _inverse(self, values):\n \"\"\"Invert areal values back to point diameter.\"\"\"\n return np.sqrt(values)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 287, "name": "LineWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 290, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n elif isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif var_type == \"categorical\":\n return Nominal(arg)\n elif var_type == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 296, "name": "EdgeWidth", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 299, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n elif isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif var_type == \"categorical\":\n return Nominal(arg)\n elif var_type == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 305, "name": "Stroke", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 310, "name": "Alpha", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 316, "name": "Offset", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 322, "name": "FontSize", "kind": "def", "category": "class", "info": "default_range"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 327, "name": "default_range", "kind": "def", "category": "function", "info": " def default_range(self) -> tuple[float, float]:\n \"\"\"Min and max values used by default for semantic mapping.\"\"\"\n return self._default_range\n\n def _forward(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to native values before linear mapping into interval.\"\"\"\n return values\n\n def _inverse(self, values: ArrayLike) -> ArrayLike:\n \"\"\"Transform applied to results of mapping that returns to native values.\"\"\"\n return values\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n\n # TODO infer continuous based on log/sqrt etc?\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n elif isinstance(arg, (list, dict)):\n return Nominal(arg)\n elif var_type == \"categorical\":\n return Nominal(arg)\n elif var_type == \"datetime\":\n return Temporal(arg)\n # TODO other variable types\n else:\n return Continuous(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n vmin, vmax = self._forward(self.default_range)\n elif isinstance(scale.values, tuple) and len(scale.values) == 2:\n vmin, vmax = self._forward(scale.values)\n else:\n if isinstance(scale.values, tuple):\n actual = f\"{len(scale.values)}-tuple\"\n else:\n actual = str(type(scale.values))\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be 2-tuple; not {actual}.\",\n ])\n raise TypeError(err)\n\n def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 338, "name": "ObjectProperty", "kind": "def", "category": "class", "info": "_default_values\tdefault_scale\tinfer_scale\tget_mapping\t_get_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 347, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean() if var_type == \"boolean\" else Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, \"order\", [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 350, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Scale:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n if var_type == \"numeric\":\n return Continuous()\n elif var_type == \"datetime\":\n return Temporal()\n elif var_type == \"boolean\":\n return Boolean()\n else:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 351, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 352, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean() if var_type == \"boolean\" else Nominal()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 352, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Boolean() if var_type == \"boolean\" else Nominal()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 354, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 355, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 356, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 356, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 358, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 362, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 363, "name": "_get_values", "kind": "ref", "category": "function", "info": " values = self._get_values(scale, levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 368, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 377, "name": "_get_values", "kind": "def", "category": "function", "info": " def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 381, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, scale.values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 384, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " values = self._check_list_length(levels, scale.values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 386, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(n)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 394, "name": "standardize", "kind": "ref", "category": "function", "info": " values = [self.standardize(x) for x in values]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 398, "name": "Marker", "kind": "def", "category": "class", "info": "standardize\t_default_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 400, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " null_value = MarkerStyle(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 407, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: MarkerPattern) -> MarkerStyle:\n return MarkerStyle(val)\n\n def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 408, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " return MarkerStyle(val)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 410, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[MarkerStyle]:\n \"\"\"Build an arbitrarily long list of unique marker styles.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\", \"X\", (4, 0, 45), \"P\", (4, 0, 0), (4, 1, 0), \"^\", (4, 1, 45), \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([(s + 1, 1, a), (s + 1, 0, a), (s, 1, 0), (s, 0, 0)])\n s += 1\n\n markers = [MarkerStyle(m) for m in markers[:n]]\n\n return markers\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 437, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " markers = [MarkerStyle(m) for m in markers[:n]]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 442, "name": "LineStyle", "kind": "def", "category": "class", "info": "standardize\t_default_values\t_get_dash_pattern"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 446, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: str | DashPattern) -> DashPatternWithOffset:\n return self._get_dash_pattern(val)\n\n def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 447, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return self._get_dash_pattern(val)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 449, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list[DashPatternWithOffset]:\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes: list[str | DashPattern] = [\n \"-\", (4, 1.5), (1, 1), (3, 1.25, 1.5, 1.25), (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(list(a)[1:-1][::-1], list(b)[1:-1]))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return [self._get_dash_pattern(x) for x in dashes]\n\n @staticmethod\n def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 490, "name": "_get_dash_pattern", "kind": "ref", "category": "function", "info": " return [self._get_dash_pattern(x) for x in dashes]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 493, "name": "_get_dash_pattern", "kind": "def", "category": "function", "info": " def _get_dash_pattern(style: str | DashPattern) -> DashPatternWithOffset:\n \"\"\"Convert linestyle arguments to dash pattern with offset.\"\"\"\n # Copied and modified from Matplotlib 3.4\n # go from short hand -> full strings\n ls_mapper = {\"-\": \"solid\", \"--\": \"dashed\", \"-.\": \"dashdot\", \":\": \"dotted\"}\n if isinstance(style, str):\n style = ls_mapper.get(style, style)\n # un-dashed styles\n if style in [\"solid\", \"none\", \"None\"]:\n offset = 0\n dashes = None\n # dashed styles\n elif style in [\"dashed\", \"dashdot\", \"dotted\"]:\n offset = 0\n dashes = tuple(mpl.rcParams[f\"lines.{style}_pattern\"])\n else:\n options = [*ls_mapper.values(), *ls_mapper.keys()]\n msg = f\"Linestyle string must be one of {options}, not {repr(style)}.\"\n raise ValueError(msg)\n\n elif isinstance(style, tuple):\n if len(style) > 1 and isinstance(style[1], tuple):\n offset, dashes = style\n elif len(style) > 1 and style[1] is None:\n offset, dashes = style\n else:\n offset = 0\n dashes = style\n else:\n val_type = type(style).__name__\n msg = f\"Linestyle must be str or tuple, not {val_type}.\"\n raise TypeError(msg)\n\n # Normalize offset to be positive and shorter than the dash cycle\n if dashes is not None:\n try:\n dsum = sum(dashes)\n except TypeError as err:\n msg = f\"Invalid dash pattern: {dashes}\"\n raise TypeError(msg) from err\n if dsum:\n offset %= dsum\n\n return offset, dashes\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 539, "name": "TextAlignment", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 543, "name": "HorizontalAlignment", "kind": "def", "category": "class", "info": "_default_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 545, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean() if var_type == \"boolean\" else Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, \"order\", [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 550, "name": "VerticalAlignment", "kind": "def", "category": "class", "info": "_default_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 552, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean() if var_type == \"boolean\" else Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, \"order\", [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 562, "name": "Color", "kind": "def", "category": "class", "info": "standardize\t_standardize_color_sequence\tinfer_scale\tget_mapping\t_get_nominal_mapping\t_get_boolean_mapping\t_get_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 567, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: ColorSpec) -> RGBTuple | RGBATuple:\n # Return color with alpha channel only if the input spec has it\n # This is so that RGBA colors can override the Alpha property\n if to_rgba(val) != to_rgba(val, 1):\n return to_rgba(val)\n else:\n return to_rgb(val)\n\n def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n\n levels = categorical_order(data, scale.order)\n colors = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n\n colors = self._get_values(scale, [True, False])\n\n def mapping(x):\n\n use = np.isfinite(x)\n x = np.asarray(x).astype(bool)\n out = np.full((len(x), colors.shape[1]), np.nan)\n out[x & use] = colors[0]\n out[~x & use] = colors[1]\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> ArrayLike:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n values = scale.values\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n return self._standardize_color_sequence(colors)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 575, "name": "_standardize_color_sequence", "kind": "def", "category": "function", "info": " def _standardize_color_sequence(self, colors: ArrayLike) -> ArrayLike:\n \"\"\"Convert color sequence to RGB(A) array, preserving but not adding alpha.\"\"\"\n def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n\n levels = categorical_order(data, scale.order)\n colors = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n\n colors = self._get_values(scale, [True, False])\n\n def mapping(x):\n\n use = np.isfinite(x)\n x = np.asarray(x).astype(bool)\n out = np.full((len(x), colors.shape[1]), np.nan)\n out[x & use] = colors[0]\n out[~x & use] = colors[1]\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> ArrayLike:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n values = scale.values\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n return self._standardize_color_sequence(colors)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 577, "name": "has_alpha", "kind": "def", "category": "function", "info": " def has_alpha(x):\n return to_rgba(x) != to_rgba(x, 1)\n\n if isinstance(colors, np.ndarray):\n needs_alpha = colors.shape[1] == 4\n else:\n needs_alpha = any(has_alpha(x) for x in colors)\n\n if needs_alpha:\n return to_rgba_array(colors)\n else:\n return to_rgba_array(colors)[:, :3]\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n # TODO when inferring Continuous without data, verify type\n\n # TODO need to rethink the variable type system\n # (e.g. boolean, ordered categories as Ordinal, etc)..\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n\n if var_type == \"boolean\":\n return Boolean(arg)\n\n if isinstance(arg, (dict, list)):\n return Nominal(arg)\n\n if isinstance(arg, tuple):\n if var_type == \"categorical\":\n # TODO It seems reasonable to allow a gradient mapping for nominal\n # scale but it also feels \"technically\" wrong. Should this infer\n # Ordinal with categorical data and, if so, verify orderedness?\n return Nominal(arg)\n return Continuous(arg)\n\n if callable(arg):\n return Continuous(arg)\n\n # TODO Do we accept str like \"log\", \"pow\", etc. for semantics?\n\n if not isinstance(arg, str):\n msg = \" \".join([\n f\"A single scale argument for {self.variable} variables must be\",\n f\"a string, dict, tuple, list, or callable, not {type(arg)}.\"\n ])\n raise TypeError(msg)\n\n if arg in QUAL_PALETTES:\n return Nominal(arg)\n elif var_type == \"numeric\":\n return Continuous(arg)\n # TODO implement scales for date variables and any others.\n else:\n return Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to color values.\"\"\"\n # TODO what is best way to do this conditional?\n # Should it be class-based or should classes have behavioral attributes?\n if isinstance(scale, Nominal):\n return self._get_nominal_mapping(scale, data)\n elif isinstance(scale, Boolean):\n return self._get_boolean_mapping(scale, data)\n\n if scale.values is None:\n # TODO Rethink best default continuous color gradient\n mapping = color_palette(\"ch:\", as_cmap=True)\n elif isinstance(scale.values, tuple):\n # TODO blend_palette will strip alpha, but we should support\n # interpolation on all four channels\n mapping = blend_palette(scale.values, as_cmap=True)\n elif isinstance(scale.values, str):\n # TODO for matplotlib colormaps this will clip extremes, which is\n # different from what using the named colormap directly would do\n # This may or may not be desireable.\n mapping = color_palette(scale.values, as_cmap=True)\n elif callable(scale.values):\n mapping = scale.values\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, tuple, or callable; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n\n levels = categorical_order(data, scale.order)\n colors = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n\n colors = self._get_values(scale, [True, False])\n\n def mapping(x):\n\n use = np.isfinite(x)\n x = np.asarray(x).astype(bool)\n out = np.full((len(x), colors.shape[1]), np.nan)\n out[x & use] = colors[0]\n out[~x & use] = colors[1]\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> ArrayLike:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n values = scale.values\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n return self._standardize_color_sequence(colors)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 583, "name": "has_alpha", "kind": "ref", "category": "function", "info": " needs_alpha = any(has_alpha(x) for x in colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 590, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 595, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 598, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 601, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 608, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 609, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 612, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 624, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 626, "name": "Continuous", "kind": "ref", "category": "function", "info": " return Continuous(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 629, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 631, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 636, "name": "_get_nominal_mapping", "kind": "ref", "category": "function", "info": " return self._get_nominal_mapping(scale, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 638, "name": "_get_boolean_mapping", "kind": "ref", "category": "function", "info": " return self._get_boolean_mapping(scale, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 642, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(\"ch:\", as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 646, "name": "blend_palette", "kind": "ref", "category": "function", "info": " mapping = blend_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 651, "name": "color_palette", "kind": "ref", "category": "function", "info": " mapping = color_palette(scale.values, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 662, "name": "_mapping", "kind": "def", "category": "function", "info": " def _mapping(x):\n # Remove alpha channel so it does not override alpha property downstream\n # TODO this will need to be more flexible to support RGBA tuples (see above)\n invalid = ~np.isfinite(x)\n out = mapping(x)[:, :3]\n out[invalid] = np.nan\n return out\n\n return _mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n\n levels = categorical_order(data, scale.order)\n colors = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n use = np.isfinite(x)\n out = np.full((len(ixs), colors.shape[1]), np.nan)\n out[use] = np.take(colors, ixs[use], axis=0)\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n\n colors = self._get_values(scale, [True, False])\n\n def mapping(x):\n\n use = np.isfinite(x)\n x = np.asarray(x).astype(bool)\n out = np.full((len(x), colors.shape[1]), np.nan)\n out[x & use] = colors[0]\n out[~x & use] = colors[1]\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> ArrayLike:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n values = scale.values\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n return self._standardize_color_sequence(colors)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 666, "name": "mapping", "kind": "ref", "category": "function", "info": " out = mapping(x)[:, :3]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 672, "name": "_get_nominal_mapping", "kind": "def", "category": "function", "info": " def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 674, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, scale.order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 675, "name": "_get_values", "kind": "ref", "category": "function", "info": " colors = self._get_values(scale, levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 677, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 686, "name": "_get_boolean_mapping", "kind": "def", "category": "function", "info": " def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 688, "name": "_get_values", "kind": "ref", "category": "function", "info": " colors = self._get_values(scale, [True, False])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 690, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 701, "name": "_get_values", "kind": "def", "category": "function", "info": " def _get_values(self, scale: Scale, levels: list) -> ArrayLike:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n values = scale.values\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n return self._standardize_color_sequence(colors)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 706, "name": "_check_dict_entries", "kind": "ref", "category": "function", "info": " self._check_dict_entries(levels, values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 709, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, values)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 711, "name": "blend_palette", "kind": "ref", "category": "function", "info": " colors = blend_palette(values, n)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 713, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(values, n)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 715, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n <= len(get_color_cycle()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 717, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 719, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 728, "name": "_standardize_color_sequence", "kind": "ref", "category": "function", "info": " return self._standardize_color_sequence(colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 736, "name": "Fill", "kind": "def", "category": "class", "info": "default_scale\tinfer_scale\tstandardize\t_default_values\tget_mapping\t_get_values"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 741, "name": "default_scale", "kind": "def", "category": "function", "info": " def default_scale(self, data: Series) -> Scale:\n \"\"\"Given data, initialize appropriate scale class.\"\"\"\n\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n if var_type == \"numeric\":\n return Continuous()\n elif var_type == \"datetime\":\n return Temporal()\n elif var_type == \"boolean\":\n return Boolean()\n else:\n return Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 742, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 743, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean() if var_type == \"boolean\" else Nominal()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 743, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Boolean() if var_type == \"boolean\" else Nominal()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 745, "name": "infer_scale", "kind": "def", "category": "function", "info": " def infer_scale(self, arg: Any, data: Series) -> Scale:\n \"\"\"Given data and a scaling argument, initialize appropriate scale class.\"\"\"\n # TODO put these somewhere external for validation\n # TODO putting this here won't pick it up if subclasses define infer_scale\n # (e.g. color). How best to handle that? One option is to call super after\n # handling property-specific possibilities (e.g. for color check that the\n # arg is not a valid palette name) but that could get tricky.\n trans_args = [\"log\", \"symlog\", \"logit\", \"pow\", \"sqrt\"]\n if isinstance(arg, str):\n if any(arg.startswith(k) for k in trans_args):\n # TODO validate numeric type? That should happen centrally somewhere\n return Continuous(trans=arg)\n else:\n msg = f\"Unknown magic arg for {self.variable} scale: '{arg}'.\"\n raise ValueError(msg)\n else:\n arg_type = type(arg).__name__\n msg = f\"Magic arg for {self.variable} scale must be str, not {arg_type}.\"\n raise TypeError(msg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 746, "name": "variable_type", "kind": "ref", "category": "function", "info": " var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 747, "name": "Boolean", "kind": "ref", "category": "function", "info": " return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 747, "name": "Nominal", "kind": "ref", "category": "function", "info": " return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 749, "name": "standardize", "kind": "def", "category": "function", "info": " def standardize(self, val: Any) -> bool:\n return bool(val)\n\n def _default_values(self, n: int) -> list:\n \"\"\"Return a list of n values, alternating True and False.\"\"\"\n if n > 2:\n msg = \" \".join([\n f\"The variable assigned to {self.variable} has more than two levels,\",\n f\"so {self.variable} values will cycle and may be uninterpretable\",\n ])\n # TODO fire in a \"nice\" way (see above)\n warnings.warn(msg, UserWarning)\n return [x for x, _ in zip(itertools.cycle([True, False]), range(n))]\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps each data value to True or False.\"\"\"\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, \"order\", [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else False\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, list):\n values = [bool(x) for x in scale.values]\n elif isinstance(scale.values, dict):\n values = [bool(scale.values[x]) for x in levels]\n elif scale.values is None:\n values = self._default_values(len(levels))\n else:\n msg = \" \".join([\n f\"Scale values for {self.variable} must be passed in\",\n f\"a list or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 752, "name": "_default_values", "kind": "def", "category": "function", "info": " def _default_values(self, n: int) -> list:\n raise NotImplementedError()\n\n def default_scale(self, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean() if var_type == \"boolean\" else Nominal()\n\n def infer_scale(self, arg: Any, data: Series) -> Scale:\n var_type = variable_type(data, boolean_type=\"boolean\", strict_boolean=True)\n return Boolean(arg) if var_type == \"boolean\" else Nominal(arg)\n\n def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Define mapping as lookup into list of object values.\"\"\"\n boolean_scale = isinstance(scale, Boolean)\n order = getattr(scale, \"order\", [True, False] if boolean_scale else None)\n levels = categorical_order(data, order)\n values = self._get_values(scale, levels)\n\n if boolean_scale:\n values = values[::-1]\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n return [\n values[ix] if np.isfinite(x_i) else self.null_value\n for x_i, ix in zip(x, ixs)\n ]\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n n = len(levels)\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n elif scale.values is None:\n values = self._default_values(n)\n else:\n msg = \" \".join([\n f\"Scale values for a {self.variable} variable must be provided\",\n f\"in a dict or list; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n values = [self.standardize(x) for x in values]\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 763, "name": "get_mapping", "kind": "def", "category": "function", "info": " def get_mapping(self, scale: Scale, data: Series) -> Mapping:\n \"\"\"Return a function that maps from data domain to property range.\"\"\"\n def identity(x):\n return x\n return identity\n\n def standardize(self, val: Any) -> Any:\n \"\"\"Coerce flexible property value to standardized representation.\"\"\"\n return val\n\n def _check_dict_entries(self, levels: list, values: dict) -> None:\n \"\"\"Input check when values are provided as a dictionary.\"\"\"\n missing = set(levels) - set(values)\n if missing:\n formatted = \", \".join(map(repr, sorted(missing, key=str)))\n err = f\"No entry in {self.variable} dictionary for {formatted}\"\n raise ValueError(err)\n\n def _check_list_length(self, levels: list, values: list) -> list:\n \"\"\"Input check when values are provided as a list.\"\"\"\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {self.variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {self.variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n # TODO look into custom PlotSpecWarning with better formatting\n if message:\n warnings.warn(message, UserWarning)\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 767, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 768, "name": "_get_values", "kind": "ref", "category": "function", "info": " values = self._get_values(scale, levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 773, "name": "mapping", "kind": "def", "category": "function", "info": " def mapping(x):\n return self._inverse(np.multiply(x, vmax - vmin) + vmin)\n\n return mapping\n\n def _get_nominal_mapping(self, scale: Nominal, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n levels = categorical_order(data, scale.order)\n values = self._get_values(scale, levels)\n\n def mapping(x):\n ixs = np.asarray(x, np.intp)\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.take(values, ixs[use])\n return out\n\n return mapping\n\n def _get_boolean_mapping(self, scale: Boolean, data: Series) -> Mapping:\n \"\"\"Identify evenly-spaced values using interval or explicit mapping.\"\"\"\n values = self._get_values(scale, [True, False])\n\n def mapping(x):\n out = np.full(len(x), np.nan)\n use = np.isfinite(x)\n out[use] = np.where(x[use], *values)\n return out\n\n return mapping\n\n def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 782, "name": "_get_values", "kind": "def", "category": "function", "info": " def _get_values(self, scale: Scale, levels: list) -> list:\n \"\"\"Validate scale.values and identify a value for each level.\"\"\"\n if isinstance(scale.values, dict):\n self._check_dict_entries(levels, scale.values)\n values = [scale.values[x] for x in levels]\n elif isinstance(scale.values, list):\n values = self._check_list_length(levels, scale.values)\n else:\n if scale.values is None:\n vmin, vmax = self.default_range\n elif isinstance(scale.values, tuple):\n vmin, vmax = scale.values\n else:\n scale_class = scale.__class__.__name__\n err = \" \".join([\n f\"Values for {self.variable} variables with {scale_class} scale\",\n f\"must be a dict, list or tuple; not {type(scale.values)}\",\n ])\n raise TypeError(err)\n\n vmin, vmax = self._forward([vmin, vmax])\n values = list(self._inverse(np.linspace(vmax, vmin, len(levels))))\n\n return values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 789, "name": "_default_values", "kind": "ref", "category": "function", "info": " values = self._default_values(len(levels))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/properties.py", "rel_fname": "seaborn/_core/properties.py", "line": 838, "name": "cls", "kind": "ref", "category": "function", "info": "PROPERTIES = {var: cls(var) for var, cls in PROPERTY_CLASSES.items()}\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 16, "name": "VarType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 37, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(\n vector: Series,\n boolean_type: Literal[\"numeric\", \"categorical\", \"boolean\"] = \"numeric\",\n strict_boolean: bool = False,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 70, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 74, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 101, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(boolean_type)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 104, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 105, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 107, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 108, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 114, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VarType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 120, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 121, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"numeric\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 125, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VarType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VarType(\"categorical\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 131, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 132, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"datetime\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 136, "name": "VarType", "kind": "ref", "category": "function", "info": " return VarType(\"categorical\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 139, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector: Series, order: list | None = None) -> list:\n \"\"\"\n Return a list of unique data values using seaborn's ordering rules.\n\n Parameters\n ----------\n vector : Series\n Vector of \"categorical\" values\n order : list\n Desired order of category levels to override the order determined\n from the `data` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is not None:\n return order\n\n if vector.dtype.name == \"category\":\n order = list(vector.cat.categories)\n else:\n order = list(filter(pd.notnull, vector.unique()))\n if variable_type(pd.Series(order)) == \"numeric\":\n order.sort()\n\n return order\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/rules.py", "rel_fname": "seaborn/_core/rules.py", "line": 164, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(pd.Series(order)) == \"numeric\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 54, "name": "Scale", "kind": "def", "category": "class", "info": "__post_init__\ttick\tlabel\t_get_locators\t_get_formatter\t_get_scale\t_spacing\t_setup\t_finalize\t__call__\t_identity"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 65, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n self._tick_params = None\n self._label_params = None\n self._legend = None\n\n def tick(self):\n raise NotImplementedError()\n\n def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 71, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self):\n raise NotImplementedError()\n\n def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 74, "name": "label", "kind": "def", "category": "function", "info": " def label(self):\n raise NotImplementedError()\n\n def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 77, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self):\n raise NotImplementedError()\n\n def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 80, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator: Locator | None = None):\n raise NotImplementedError()\n\n def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 83, "name": "_get_scale", "kind": "def", "category": "function", "info": " def _get_scale(self, name: str, forward: Callable, inverse: Callable):\n\n major_locator, minor_locator = self._get_locators(**self._tick_params)\n major_formatter = self._get_formatter(major_locator, **self._label_params)\n\n class InternalScale(mpl.scale.FuncScale):\n def set_default_locators_and_formatters(self, axis):\n axis.set_major_locator(major_locator)\n if minor_locator is not None:\n axis.set_minor_locator(minor_locator)\n axis.set_major_formatter(major_formatter)\n\n return InternalScale(name, (forward, inverse))\n\n def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 85, "name": "_get_locators", "kind": "ref", "category": "function", "info": " major_locator, minor_locator = self._get_locators(**self._tick_params)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 86, "name": "_get_formatter", "kind": "ref", "category": "function", "info": " major_formatter = self._get_formatter(major_locator, **self._label_params)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 88, "name": "InternalScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 95, "name": "InternalScale", "kind": "ref", "category": "function", "info": " return InternalScale(name, (forward, inverse))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 97, "name": "_spacing", "kind": "def", "category": "function", "info": " def _spacing(self, x: Series) -> float:\n space = self._spacer(x)\n if np.isnan(space):\n # This happens when there is no variance in the orient coordinate data\n # Not exactly clear what the right default is, but 1 seems reasonable?\n return 1\n return space\n\n def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 98, "name": "_spacer", "kind": "ref", "category": "function", "info": " space = self._spacer(x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 105, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 110, "name": "_finalize", "kind": "def", "category": "function", "info": " def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 128, "name": "func", "kind": "ref", "category": "function", "info": " trans_data = func(trans_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 136, "name": "_identity", "kind": "def", "category": "function", "info": " def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 138, "name": "Identity", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 144, "name": "Identity", "kind": "ref", "category": "function", "info": " return Identity()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 148, "name": "Boolean", "kind": "def", "category": "class", "info": "_setup\t_finalize\ttick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 163, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 169, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 171, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 173, "name": "na_safe_cast", "kind": "def", "category": "function", "info": " def na_safe_cast(x):\n # TODO this doesn't actually need to be a closure\n if np.isscalar(x):\n return float(bool(x))\n else:\n if hasattr(x, \"notna\"):\n # Handle pd.NA; np<>pd interop with NA is tricky\n use = x.notna().to_numpy()\n else:\n use = np.isfinite(x)\n out = np.full(len(x), np.nan, dtype=float)\n out[use] = x[use].astype(bool).astype(float)\n return out\n\n new._pipeline = [na_safe_cast, prop.get_mapping(new, data)]\n new._spacer = _default_spacer\n if prop.legend:\n new._legend = [True, False], [\"True\", \"False\"]\n\n forward, inverse = _make_identity_transforms()\n mpl_scale = new._get_scale(str(data.name), forward, inverse)\n\n axis = PseudoAxis(mpl_scale) if axis is None else axis\n mpl_scale.set_default_locators_and_formatters(axis)\n new._matplotlib_scale = mpl_scale\n\n return new\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n\n # We want values to appear in a True, False order but also want\n # True/False to be drawn at 1/0 positions respectively to avoid nasty\n # surprises if additional artists are added through the matplotlib API.\n # We accomplish this using axis inversion akin to what we do in Nominal.\n\n ax = axis.axes\n name = axis.axis_name\n axis.grid(False, which=\"both\")\n if name not in p._limits:\n nticks = len(axis.get_major_ticks())\n lo, hi = -.5, nticks - .5\n if name == \"x\":\n lo, hi = hi, lo\n set_lim = getattr(ax, f\"set_{name}lim\")\n set_lim(lo, hi, auto=None)\n\n def tick(self, locator: Locator | None = None):\n new = copy(self)\n new._tick_params = {\"locator\": locator}\n return new\n\n def label(self, formatter: Formatter | None = None):\n new = copy(self)\n new._label_params = {\"formatter\": formatter}\n return new\n\n def _get_locators(self, locator):\n if locator is not None:\n return locator\n return FixedLocator([0, 1]), None\n\n def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 187, "name": "get_mapping", "kind": "ref", "category": "function", "info": " new._pipeline = [na_safe_cast, prop.get_mapping(new, data)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 192, "name": "_make_identity_transforms", "kind": "ref", "category": "function", "info": " forward, inverse = _make_identity_transforms()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 193, "name": "_get_scale", "kind": "ref", "category": "function", "info": " mpl_scale = new._get_scale(str(data.name), forward, inverse)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 195, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale) if axis is None else axis\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 201, "name": "_finalize", "kind": "def", "category": "function", "info": " def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 217, "name": "set_lim", "kind": "ref", "category": "function", "info": " set_lim(lo, hi, auto=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 219, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self, locator: Locator | None = None):\n new = copy(self)\n new._tick_params = {\"locator\": locator}\n return new\n\n def label(self, formatter: Formatter | None = None):\n new = copy(self)\n new._label_params = {\"formatter\": formatter}\n return new\n\n def _get_locators(self, locator):\n if locator is not None:\n return locator\n return FixedLocator([0, 1]), None\n\n def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 224, "name": "label", "kind": "def", "category": "function", "info": " def label(self, formatter: Formatter | None = None):\n new = copy(self)\n new._label_params = {\"formatter\": formatter}\n return new\n\n def _get_locators(self, locator):\n if locator is not None:\n return locator\n return FixedLocator([0, 1]), None\n\n def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 229, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator):\n if locator is not None:\n return locator\n return FixedLocator([0, 1]), None\n\n def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 234, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 241, "name": "Nominal", "kind": "def", "category": "class", "info": "_setup\t_finalize\ttick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 252, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 258, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 260, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 265, "name": "categorical_order", "kind": "ref", "category": "function", "info": " units_seed = categorical_order(data, new.order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 279, "name": "CatScale", "kind": "def", "category": "class", "info": "set_default_locators_and_formatters"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 289, "name": "CatScale", "kind": "ref", "category": "function", "info": " mpl_scale = CatScale(data.name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 291, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 303, "name": "stringify", "kind": "ref", "category": "function", "info": " axis.update_units(stringify(np.array(units_seed)))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 314, "name": "stringify", "kind": "ref", "category": "function", "info": " out[keep] = axis.convert_units(stringify(x[keep]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 317, "name": "get_mapping", "kind": "ref", "category": "function", "info": " new._pipeline = [convert_units, prop.get_mapping(new, data)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 321, "name": "stringify", "kind": "ref", "category": "function", "info": " new._legend = units_seed, list(stringify(units_seed))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 325, "name": "_finalize", "kind": "def", "category": "function", "info": " def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 336, "name": "set_lim", "kind": "ref", "category": "function", "info": " set_lim(lo, hi, auto=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 338, "name": "tick", "kind": "def", "category": "function", "info": " def tick(self, locator: Locator | None = None) -> Nominal:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n\n Returns\n -------\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._tick_params = {\"locator\": locator}\n return new\n\n def label(self, formatter: Formatter | None = None) -> Nominal:\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\"formatter\": formatter}\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 360, "name": "label", "kind": "def", "category": "function", "info": " def label(self, formatter: Formatter | None = None) -> Nominal:\n \"\"\"\n Configure the selection of labels for the scale's axis or legend.\n\n .. note::\n This API is under construction and will be enhanced over time.\n At the moment, it is probably not very useful.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured matplotlib formatter; other parameters will not be used.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n new = copy(self)\n new._label_params = {\"formatter\": formatter}\n return new\n\n def _get_locators(self, locator):\n\n if locator is not None:\n return locator, None\n\n locator = mpl.category.StrCategoryLocator({})\n\n return locator, None\n\n def _get_formatter(self, locator, formatter):\n\n if formatter is not None:\n return formatter\n\n formatter = mpl.category.StrCategoryFormatter({})\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 383, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator):\n if locator is not None:\n return locator\n return FixedLocator([0, 1]), None\n\n def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 388, "name": "StrCategoryLocator", "kind": "ref", "category": "function", "info": " locator = mpl.category.StrCategoryLocator({})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 392, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter):\n if formatter is not None:\n return formatter\n return FuncFormatter(lambda x, _: str(bool(x)))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 397, "name": "StrCategoryFormatter", "kind": "ref", "category": "function", "info": " formatter = mpl.category.StrCategoryFormatter({})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 403, "name": "Ordinal", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 409, "name": "Discrete", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 415, "name": "ContinuousBase", "kind": "def", "category": "class", "info": "_setup\t_get_transform"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 420, "name": "_setup", "kind": "def", "category": "function", "info": " def _setup(\n self, data: Series, prop: Property, axis: Axis | None = None,\n ) -> Scale:\n raise NotImplementedError()\n\n def _finalize(self, p: Plot, axis: Axis) -> None:\n \"\"\"Perform scale-specific axis tweaks after adding artists.\"\"\"\n pass\n\n def __call__(self, data: Series) -> ArrayLike:\n\n trans_data: Series | NDArray | list\n\n # TODO sometimes we need to handle scalars (e.g. for Line)\n # but what is the best way to do that?\n scalar_data = np.isscalar(data)\n if scalar_data:\n trans_data = np.array([data])\n else:\n trans_data = data\n\n for func in self._pipeline:\n if func is not None:\n trans_data = func(trans_data)\n\n if scalar_data:\n return trans_data[0]\n else:\n return trans_data\n\n @staticmethod\n def _identity():\n\n class Identity(Scale):\n _pipeline = []\n _spacer = None\n _legend = None\n _matplotlib_scale = None\n\n return Identity()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 426, "name": "tick", "kind": "ref", "category": "function", "info": " new = new.tick()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 428, "name": "label", "kind": "ref", "category": "function", "info": " new = new.label()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 430, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = new._get_transform()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 432, "name": "_get_scale", "kind": "ref", "category": "function", "info": " mpl_scale = new._get_scale(str(data.name), forward, inverse)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 435, "name": "PseudoAxis", "kind": "ref", "category": "function", "info": " axis = PseudoAxis(mpl_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 448, "name": "forward", "kind": "ref", "category": "function", "info": " a = forward(vmin)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 449, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 449, "name": "forward", "kind": "ref", "category": "function", "info": " b = forward(vmax) - forward(vmin)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 451, "name": "normalize", "kind": "def", "category": "function", "info": " def normalize(x):\n return (x - a) / b\n\n else:\n normalize = vmin = vmax = None\n\n new._pipeline = [\n axis.convert_units,\n forward,\n normalize,\n prop.get_mapping(new, data)\n ]\n\n def spacer(x):\n x = x.dropna().unique()\n if len(x) < 2:\n return np.nan\n return np.min(np.diff(np.sort(x)))\n new._spacer = spacer\n\n # TODO How to allow disabling of legend for all uses of property?\n # Could add a Scale parameter, or perhaps Scale.suppress()?\n # Are there other useful parameters that would be in Scale.legend()\n # besides allowing Scale.legend(False)?\n if prop.legend:\n axis.set_view_interval(vmin, vmax)\n locs = axis.major.locator()\n locs = locs[(vmin <= locs) & (locs <= vmax)]\n # Avoid having an offset / scientific notation in a legend\n # as we don't represent that anywhere so it ends up incorrect.\n # This could become an option (e.g. Continuous.label(offset=True))\n # in which case we would need to figure out how to show it.\n if hasattr(axis.major.formatter, \"set_useOffset\"):\n axis.major.formatter.set_useOffset(False)\n if hasattr(axis.major.formatter, \"set_scientific\"):\n axis.major.formatter.set_scientific(False)\n labels = axis.major.formatter.format_ticks(locs)\n new._legend = list(locs), list(labels)\n\n return new\n\n def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 461, "name": "get_mapping", "kind": "ref", "category": "function", "info": " prop.get_mapping(new, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 464, "name": "spacer", "kind": "def", "category": "function", "info": " def spacer(x):\n x = x.dropna().unique()\n if len(x) < 2:\n return np.nan\n return np.min(np.diff(np.sort(x)))\n new._spacer = spacer\n\n # TODO How to allow disabling of legend for all uses of property?\n # Could add a Scale parameter, or perhaps Scale.suppress()?\n # Are there other useful parameters that would be in Scale.legend()\n # besides allowing Scale.legend(False)?\n if prop.legend:\n axis.set_view_interval(vmin, vmax)\n locs = axis.major.locator()\n locs = locs[(vmin <= locs) & (locs <= vmax)]\n # Avoid having an offset / scientific notation in a legend\n # as we don't represent that anywhere so it ends up incorrect.\n # This could become an option (e.g. Continuous.label(offset=True))\n # in which case we would need to figure out how to show it.\n if hasattr(axis.major.formatter, \"set_useOffset\"):\n axis.major.formatter.set_useOffset(False)\n if hasattr(axis.major.formatter, \"set_scientific\"):\n axis.major.formatter.set_scientific(False)\n labels = axis.major.formatter.format_ticks(locs)\n new._legend = list(locs), list(labels)\n\n return new\n\n def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 477, "name": "locator", "kind": "ref", "category": "function", "info": " locs = axis.major.locator()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 492, "name": "_get_transform", "kind": "def", "category": "function", "info": " def _get_transform(self):\n\n arg = self.trans\n\n def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 496, "name": "get_param", "kind": "def", "category": "function", "info": " def get_param(method, default):\n if arg == method:\n return default\n return float(arg[len(method):])\n\n if arg is None:\n return _make_identity_transforms()\n elif isinstance(arg, tuple):\n return arg\n elif isinstance(arg, str):\n if arg == \"ln\":\n return _make_log_transforms()\n elif arg == \"logit\":\n base = get_param(\"logit\", 10)\n return _make_logit_transforms(base)\n elif arg.startswith(\"log\"):\n base = get_param(\"log\", 10)\n return _make_log_transforms(base)\n elif arg.startswith(\"symlog\"):\n c = get_param(\"symlog\", 1)\n return _make_symlog_transforms(c)\n elif arg.startswith(\"pow\"):\n exp = get_param(\"pow\", 2)\n return _make_power_transforms(exp)\n elif arg == \"sqrt\":\n return _make_sqrt_transforms()\n else:\n raise ValueError(f\"Unknown value provided for trans: {arg!r}\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 502, "name": "_make_identity_transforms", "kind": "ref", "category": "function", "info": " return _make_identity_transforms()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 507, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 509, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"logit\", 10)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 510, "name": "_make_logit_transforms", "kind": "ref", "category": "function", "info": " return _make_logit_transforms(base)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 512, "name": "get_param", "kind": "ref", "category": "function", "info": " base = get_param(\"log\", 10)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 513, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " return _make_log_transforms(base)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 515, "name": "get_param", "kind": "ref", "category": "function", "info": " c = get_param(\"symlog\", 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 516, "name": "_make_symlog_transforms", "kind": "ref", "category": "function", "info": " return _make_symlog_transforms(c)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 518, "name": "get_param", "kind": "ref", "category": "function", "info": " exp = get_param(\"pow\", 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 519, "name": "_make_power_transforms", "kind": "ref", "category": "function", "info": " return _make_power_transforms(exp)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 521, "name": "_make_sqrt_transforms", "kind": "ref", "category": "function", "info": " return _make_sqrt_transforms()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 527, "name": "Continuous", "kind": "def", "category": "class", "info": "tick\tlabel\t_parse_for_log_params\t_get_locators\t_get_formatter"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 539, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] | None = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n # Input checks\n if locator is not None and not isinstance(locator, Locator):\n raise TypeError(\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError(\"`count` requires `between` with log transform.\")\n if every is not None:\n raise RuntimeError(\"`every` not supported with log transform.\")\n\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n \"at\": at,\n \"upto\": upto,\n \"count\": count,\n \"every\": every,\n \"between\": between,\n \"minor\": minor,\n }\n return new\n\n def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable with a signature like\n `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the\n tick value and `pos` is passed as the tick index.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 581, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 600, "name": "label", "kind": "def", "category": "function", "info": " def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable with a signature like\n `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the\n tick value and `pos` is passed as the tick index.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 652, "name": "_parse_for_log_params", "kind": "def", "category": "function", "info": " def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 658, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"^log(\\d*)\", trans)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 661, "name": "match", "kind": "ref", "category": "function", "info": " m = re.match(r\"symlog(\\d*)\", trans)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 666, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 668, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 685, "name": "_get_transform", "kind": "ref", "category": "function", "info": " forward, inverse = self._get_transform()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 686, "name": "forward", "kind": "ref", "category": "function", "info": " lo, hi = forward(between)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 687, "name": "inverse", "kind": "ref", "category": "function", "info": " ticks = inverse(np.linspace(lo, hi, num=count))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 722, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 724, "name": "_parse_for_log_params", "kind": "ref", "category": "function", "info": " log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 763, "name": "Temporal", "kind": "def", "category": "class", "info": "tick\tlabel\t_get_locators\t_get_formatter"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 781, "name": "tick", "kind": "def", "category": "function", "info": " def tick(\n self,\n locator: Locator | None = None, *,\n at: Sequence[float] | None = None,\n upto: int | None = None,\n count: int | None = None,\n every: float | None = None,\n between: tuple[float, float] | None = None,\n minor: int | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the selection of ticks for the scale's axis or legend.\n\n Parameters\n ----------\n locator : :class:`matplotlib.ticker.Locator` subclass\n Pre-configured matplotlib locator; other parameters will not be used.\n at : sequence of floats\n Place ticks at these specific locations (in data units).\n upto : int\n Choose \"nice\" locations for ticks, but do not exceed this number.\n count : int\n Choose exactly this number of ticks, bounded by `between` or axis limits.\n every : float\n Choose locations at this interval of separation (in data units).\n between : pair of floats\n Bound upper / lower ticks when using `every` or `count`.\n minor : int\n Number of unlabeled ticks to draw between labeled \"major\" ticks.\n\n Returns\n -------\n scale\n Copy of self with new tick configuration.\n\n \"\"\"\n # Input checks\n if locator is not None and not isinstance(locator, Locator):\n raise TypeError(\n f\"Tick locator must be an instance of {Locator!r}, \"\n f\"not {type(locator)!r}.\"\n )\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if log_base or symlog_thresh:\n if count is not None and between is None:\n raise RuntimeError(\"`count` requires `between` with log transform.\")\n if every is not None:\n raise RuntimeError(\"`every` not supported with log transform.\")\n\n new = copy(self)\n new._tick_params = {\n \"locator\": locator,\n \"at\": at,\n \"upto\": upto,\n \"count\": count,\n \"every\": every,\n \"between\": between,\n \"minor\": minor,\n }\n return new\n\n def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable with a signature like\n `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the\n tick value and `pos` is passed as the tick index.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 815, "name": "label", "kind": "def", "category": "function", "info": " def label(\n self,\n formatter: Formatter | None = None, *,\n like: str | Callable | None = None,\n base: int | None | Default = default,\n unit: str | None = None,\n ) -> Continuous:\n \"\"\"\n Configure the appearance of tick labels for the scale's axis or legend.\n\n Parameters\n ----------\n formatter : :class:`matplotlib.ticker.Formatter` subclass\n Pre-configured formatter to use; other parameters will be ignored.\n like : str or callable\n Either a format pattern (e.g., `\".2f\"`), a format string with fields named\n `x` and/or `pos` (e.g., `\"${x:.2f}\"`), or a callable with a signature like\n `f(x: float, pos: int) -> str`. In the latter variants, `x` is passed as the\n tick value and `pos` is passed as the tick index.\n base : number\n Use log formatter (with scientific notation) having this value as the base.\n Set to `None` to override the default formatter with a log transform.\n unit : str or (str, str) tuple\n Use SI prefixes with these units (e.g., with `unit=\"g\"`, a tick value\n of 5000 will appear as `5 kg`). When a tuple, the first element gives the\n separator between the number and unit.\n\n Returns\n -------\n scale\n Copy of self with new label configuration.\n\n \"\"\"\n # Input checks\n if formatter is not None and not isinstance(formatter, Formatter):\n raise TypeError(\n f\"Label formatter must be an instance of {Formatter!r}, \"\n f\"not {type(formatter)!r}\"\n )\n if like is not None and not (isinstance(like, str) or callable(like)):\n msg = f\"`like` must be a string or callable, not {type(like).__name__}.\"\n raise TypeError(msg)\n\n new = copy(self)\n new._label_params = {\n \"formatter\": formatter,\n \"like\": like,\n \"base\": base,\n \"unit\": unit,\n }\n return new\n\n def _parse_for_log_params(\n self, trans: str | TransFuncs | None\n ) -> tuple[float | None, float | None]:\n\n log_base = symlog_thresh = None\n if isinstance(trans, str):\n m = re.match(r\"^log(\\d*)\", trans)\n if m is not None:\n log_base = float(m[1] or 10)\n m = re.match(r\"symlog(\\d*)\", trans)\n if m is not None:\n symlog_thresh = float(m[1] or 1)\n return log_base, symlog_thresh\n\n def _get_locators(self, locator, at, upto, count, every, between, minor):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n\n if locator is not None:\n major_locator = locator\n\n elif upto is not None:\n if log_base:\n major_locator = LogLocator(base=log_base, numticks=upto)\n else:\n major_locator = MaxNLocator(upto, steps=[1, 1.5, 2, 2.5, 3, 5, 10])\n\n elif count is not None:\n if between is None:\n # This is rarely useful (unless you are setting limits)\n major_locator = LinearLocator(count)\n else:\n if log_base or symlog_thresh:\n forward, inverse = self._get_transform()\n lo, hi = forward(between)\n ticks = inverse(np.linspace(lo, hi, num=count))\n else:\n ticks = np.linspace(*between, num=count)\n major_locator = FixedLocator(ticks)\n\n elif every is not None:\n if between is None:\n major_locator = MultipleLocator(every)\n else:\n lo, hi = between\n ticks = np.arange(lo, hi + every, every)\n major_locator = FixedLocator(ticks)\n\n elif at is not None:\n major_locator = FixedLocator(at)\n\n else:\n if log_base:\n major_locator = LogLocator(log_base)\n elif symlog_thresh:\n major_locator = SymmetricalLogLocator(linthresh=symlog_thresh, base=10)\n else:\n major_locator = AutoLocator()\n\n if minor is None:\n minor_locator = LogLocator(log_base, subs=None) if log_base else None\n else:\n if log_base:\n subs = np.linspace(0, log_base, minor + 2)[1:-1]\n minor_locator = LogLocator(log_base, subs=subs)\n else:\n minor_locator = AutoMinorLocator(minor + 1)\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, like, base, unit):\n\n log_base, symlog_thresh = self._parse_for_log_params(self.trans)\n if base is default:\n if symlog_thresh:\n log_base = 10\n base = log_base\n\n if formatter is not None:\n return formatter\n\n if like is not None:\n if isinstance(like, str):\n if \"{x\" in like or \"{pos\" in like:\n fmt = like\n else:\n fmt = f\"{{x:{like}}}\"\n formatter = StrMethodFormatter(fmt)\n else:\n formatter = FuncFormatter(like)\n\n elif base is not None:\n # We could add other log options if necessary\n formatter = LogFormatterSciNotation(base)\n\n elif unit is not None:\n if isinstance(unit, tuple):\n sep, unit = unit\n elif not unit:\n sep = \"\"\n else:\n sep = \" \"\n formatter = EngFormatter(unit, sep=sep)\n\n else:\n formatter = ScalarFormatter()\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 844, "name": "_get_locators", "kind": "def", "category": "function", "info": " def _get_locators(self, locator, upto):\n\n if locator is not None:\n major_locator = locator\n elif upto is not None:\n major_locator = AutoDateLocator(minticks=2, maxticks=upto)\n\n else:\n major_locator = AutoDateLocator(minticks=2, maxticks=6)\n minor_locator = None\n\n return major_locator, minor_locator\n\n def _get_formatter(self, locator, formatter, concise):\n\n if formatter is not None:\n return formatter\n\n if concise:\n # TODO ideally we would have concise coordinate ticks,\n # but full semantic ticks. Is that possible?\n formatter = ConciseDateFormatter(locator)\n else:\n formatter = AutoDateFormatter(locator)\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 857, "name": "_get_formatter", "kind": "def", "category": "function", "info": " def _get_formatter(self, locator, formatter, concise):\n\n if formatter is not None:\n return formatter\n\n if concise:\n # TODO ideally we would have concise coordinate ticks,\n # but full semantic ticks. Is that possible?\n formatter = ConciseDateFormatter(locator)\n else:\n formatter = AutoDateFormatter(locator)\n\n return formatter\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 890, "name": "PseudoAxis", "kind": "def", "category": "class", "info": "__init__\tset_view_interval\tget_view_interval\tset_data_interval\tget_data_interval\tget_tick_space\tset_major_locator\tset_major_formatter\tset_minor_locator\tset_minor_formatter\tset_units\tupdate_units\tconvert_units\tget_scale\tget_majorticklocs"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 907, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.major = mpl.axis.Ticker()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 908, "name": "Ticker", "kind": "ref", "category": "function", "info": " self.minor = mpl.axis.Ticker()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 960, "name": "get_converter", "kind": "ref", "category": "function", "info": " self.converter = mpl.units.registry.get_converter(x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 962, "name": "default_units", "kind": "ref", "category": "function", "info": " self.converter.default_units(x, self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 964, "name": "axisinfo", "kind": "ref", "category": "function", "info": " info = self.converter.axisinfo(self.units, self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 982, "name": "convert", "kind": "ref", "category": "function", "info": " return self.converter.convert(x, self.units, self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 993, "name": "locator", "kind": "ref", "category": "function", "info": " return self.major.locator()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1000, "name": "_make_identity_transforms", "kind": "def", "category": "function", "info": "def _make_identity_transforms() -> TransFuncs:\n\n def identity(x):\n return x\n\n return identity, identity\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1008, "name": "_make_logit_transforms", "kind": "def", "category": "function", "info": "def _make_logit_transforms(base: float | None = None) -> TransFuncs:\n\n log, exp = _make_log_transforms(base)\n\n def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1010, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1012, "name": "logit", "kind": "def", "category": "function", "info": " def logit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return log(x) - log(1 - x)\n\n def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1016, "name": "expit", "kind": "def", "category": "function", "info": " def expit(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return exp(x) / (1 + exp(x))\n\n return logit, expit\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1023, "name": "_make_log_transforms", "kind": "def", "category": "function", "info": "def _make_log_transforms(base: float | None = None) -> TransFuncs:\n\n fs: TransFuncs\n if base is None:\n fs = np.log, np.exp\n elif base == 2:\n fs = np.log2, partial(np.power, 2)\n elif base == 10:\n fs = np.log10, partial(np.power, 10)\n else:\n def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1033, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1048, "name": "_make_symlog_transforms", "kind": "def", "category": "function", "info": "def _make_symlog_transforms(c: float = 1, base: float = 10) -> TransFuncs:\n\n # From https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001\n\n # Note: currently not using base because we only get\n # one parameter from the string, and are using c (this is consistent with d3)\n\n log, exp = _make_log_transforms(base)\n\n def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1055, "name": "_make_log_transforms", "kind": "ref", "category": "function", "info": " log, exp = _make_log_transforms(base)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1057, "name": "symlog", "kind": "def", "category": "function", "info": " def symlog(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * log(1 + np.abs(np.divide(x, c)))\n\n def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1061, "name": "symexp", "kind": "def", "category": "function", "info": " def symexp(x):\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return np.sign(x) * c * (exp(np.abs(x)) - 1)\n\n return symlog, symexp\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1068, "name": "_make_sqrt_transforms", "kind": "def", "category": "function", "info": "def _make_sqrt_transforms() -> TransFuncs:\n\n def sqrt(x):\n return np.sign(x) * np.sqrt(np.abs(x))\n\n def square(x):\n return np.sign(x) * np.square(x)\n\n return sqrt, square\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1079, "name": "_make_power_transforms", "kind": "def", "category": "function", "info": "def _make_power_transforms(exp: float) -> TransFuncs:\n\n def forward(x):\n return np.sign(x) * np.power(np.abs(x), exp)\n\n def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1081, "name": "forward", "kind": "def", "category": "function", "info": " def forward(x):\n return np.log(x) / np.log(base)\n fs = forward, partial(np.power, base)\n\n def log(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[0](x)\n\n def exp(x: ArrayLike) -> ArrayLike:\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n return fs[1](x)\n\n return log, exp\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1084, "name": "inverse", "kind": "def", "category": "function", "info": " def inverse(x):\n return np.sign(x) * np.power(np.abs(x), 1 / exp)\n\n return forward, inverse\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/scales.py", "rel_fname": "seaborn/_core/scales.py", "line": 1090, "name": "_default_spacer", "kind": "def", "category": "function", "info": "def _default_spacer(x: Series) -> float:\n return 1\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 15, "name": "Subplots", "kind": "def", "category": "class", "info": "__init__\t_check_dimension_uniqueness\t_determine_grid_dimensions\t_handle_wrapping\t_determine_axis_sharing\tinit_figure\t__iter__\t__len__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 40, "name": "_check_dimension_uniqueness", "kind": "ref", "category": "function", "info": " self._check_dimension_uniqueness(facet_spec, pair_spec)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 41, "name": "_determine_grid_dimensions", "kind": "ref", "category": "function", "info": " self._determine_grid_dimensions(facet_spec, pair_spec)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 42, "name": "_handle_wrapping", "kind": "ref", "category": "function", "info": " self._handle_wrapping(facet_spec, pair_spec)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 43, "name": "_determine_axis_sharing", "kind": "ref", "category": "function", "info": " self._determine_axis_sharing(pair_spec)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 45, "name": "_check_dimension_uniqueness", "kind": "def", "category": "function", "info": " def _check_dimension_uniqueness(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Reject specs that pair and facet on (or wrap to) same figure dimension.\"\"\"\n err = None\n\n facet_vars = facet_spec.get(\"variables\", {})\n\n if facet_spec.get(\"wrap\") and {\"col\", \"row\"} <= set(facet_vars):\n err = \"Cannot wrap facets when specifying both `col` and `row`.\"\n elif (\n pair_spec.get(\"wrap\")\n and pair_spec.get(\"cross\", True)\n and len(pair_spec.get(\"structure\", {}).get(\"x\", [])) > 1\n and len(pair_spec.get(\"structure\", {}).get(\"y\", [])) > 1\n ):\n err = \"Cannot wrap subplots when pairing on both `x` and `y`.\"\n\n collisions = {\"x\": [\"columns\", \"rows\"], \"y\": [\"rows\", \"columns\"]}\n for pair_axis, (multi_dim, wrap_dim) in collisions.items():\n if pair_axis not in pair_spec.get(\"structure\", {}):\n continue\n elif multi_dim[:3] in facet_vars:\n err = f\"Cannot facet the {multi_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and facet_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {wrap_dim} while pairing on `{pair_axis}``.\"\n elif wrap_dim[:3] in facet_vars and pair_spec.get(\"wrap\"):\n err = f\"Cannot wrap the {multi_dim} while faceting the {wrap_dim}.\"\n\n if err is not None:\n raise RuntimeError(err) # TODO what err class? Define PlotSpecError?\n\n def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 77, "name": "_determine_grid_dimensions", "kind": "def", "category": "function", "info": " def _determine_grid_dimensions(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Parse faceting and pairing information to define figure structure.\"\"\"\n self.grid_dimensions: dict[str, list] = {}\n for dim, axis in zip([\"col\", \"row\"], [\"x\", \"y\"]):\n\n facet_vars = facet_spec.get(\"variables\", {})\n if dim in facet_vars:\n self.grid_dimensions[dim] = facet_spec[\"structure\"][dim]\n elif axis in pair_spec.get(\"structure\", {}):\n self.grid_dimensions[dim] = [\n None for _ in pair_spec.get(\"structure\", {})[axis]\n ]\n else:\n self.grid_dimensions[dim] = [None]\n\n self.subplot_spec[f\"n{dim}s\"] = len(self.grid_dimensions[dim])\n\n if not pair_spec.get(\"cross\", True):\n self.subplot_spec[\"nrows\"] = 1\n\n self.n_subplots = self.subplot_spec[\"ncols\"] * self.subplot_spec[\"nrows\"]\n\n def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 101, "name": "_handle_wrapping", "kind": "def", "category": "function", "info": " def _handle_wrapping(\n self, facet_spec: FacetSpec, pair_spec: PairSpec\n ) -> None:\n \"\"\"Update figure structure parameters based on facet/pair wrapping.\"\"\"\n self.wrap = wrap = facet_spec.get(\"wrap\") or pair_spec.get(\"wrap\")\n if not wrap:\n return\n\n wrap_dim = \"row\" if self.subplot_spec[\"nrows\"] > 1 else \"col\"\n flow_dim = {\"row\": \"col\", \"col\": \"row\"}[wrap_dim]\n n_subplots = self.subplot_spec[f\"n{wrap_dim}s\"]\n flow = int(np.ceil(n_subplots / wrap))\n\n if wrap < self.subplot_spec[f\"n{wrap_dim}s\"]:\n self.subplot_spec[f\"n{wrap_dim}s\"] = wrap\n self.subplot_spec[f\"n{flow_dim}s\"] = flow\n self.n_subplots = n_subplots\n self.wrap_dim = wrap_dim\n\n def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 120, "name": "_determine_axis_sharing", "kind": "def", "category": "function", "info": " def _determine_axis_sharing(self, pair_spec: PairSpec) -> None:\n \"\"\"Update subplot spec with default or specified axis sharing parameters.\"\"\"\n axis_to_dim = {\"x\": \"col\", \"y\": \"row\"}\n key: str\n val: str | bool\n for axis in \"xy\":\n key = f\"share{axis}\"\n # Always use user-specified value, if present\n if key not in self.subplot_spec:\n if axis in pair_spec.get(\"structure\", {}):\n # Paired axes are shared along one dimension by default\n if self.wrap is None and pair_spec.get(\"cross\", True):\n val = axis_to_dim[axis]\n else:\n val = False\n else:\n # This will pick up faceted plots, as well as single subplot\n # figures, where the value doesn't really matter\n val = True\n self.subplot_spec[key] = val\n\n def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/subplots.py", "rel_fname": "seaborn/_core/subplots.py", "line": 141, "name": "init_figure", "kind": "def", "category": "function", "info": " def init_figure(\n self,\n pair_spec: PairSpec,\n pyplot: bool = False,\n figure_kws: dict | None = None,\n target: Axes | Figure | SubFigure = None,\n ) -> Figure:\n \"\"\"Initialize matplotlib objects and add seaborn-relevant metadata.\"\"\"\n # TODO reduce need to pass pair_spec here?\n\n if figure_kws is None:\n figure_kws = {}\n\n if isinstance(target, mpl.axes.Axes):\n\n if max(self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]) > 1:\n err = \" \".join([\n \"Cannot create multiple subplots after calling `Plot.on` with\",\n f\"a {mpl.axes.Axes} object.\",\n ])\n try:\n err += f\" You may want to use a {mpl.figure.SubFigure} instead.\"\n except AttributeError: # SubFigure added in mpl 3.4\n pass\n raise RuntimeError(err)\n\n self._subplot_list = [{\n \"ax\": target,\n \"left\": True,\n \"right\": True,\n \"top\": True,\n \"bottom\": True,\n \"col\": None,\n \"row\": None,\n \"x\": \"x\",\n \"y\": \"y\",\n }]\n self._figure = target.figure\n return self._figure\n\n elif (\n hasattr(mpl.figure, \"SubFigure\") # Added in mpl 3.4\n and isinstance(target, mpl.figure.SubFigure)\n ):\n figure = target.figure\n elif isinstance(target, mpl.figure.Figure):\n figure = target\n else:\n if pyplot:\n figure = plt.figure(**figure_kws)\n else:\n figure = mpl.figure.Figure(**figure_kws)\n target = figure\n self._figure = figure\n\n axs = target.subplots(**self.subplot_spec, squeeze=False)\n\n if self.wrap:\n # Remove unused Axes and flatten the rest into a (2D) vector\n axs_flat = axs.ravel({\"col\": \"C\", \"row\": \"F\"}[self.wrap_dim])\n axs, extra = np.split(axs_flat, [self.n_subplots])\n for ax in extra:\n ax.remove()\n if self.wrap_dim == \"col\":\n axs = axs[np.newaxis, :]\n else:\n axs = axs[:, np.newaxis]\n\n # Get i, j coordinates for each Axes object\n # Note that i, j are with respect to faceting/pairing,\n # not the subplot grid itself, (which only matters in the case of wrapping).\n iter_axs: np.ndenumerate | zip\n if not pair_spec.get(\"cross\", True):\n indices = np.arange(self.n_subplots)\n iter_axs = zip(zip(indices, indices), axs.flat)\n else:\n iter_axs = np.ndenumerate(axs)\n\n self._subplot_list = []\n for (i, j), ax in iter_axs:\n\n info = {\"ax\": ax}\n\n nrows, ncols = self.subplot_spec[\"nrows\"], self.subplot_spec[\"ncols\"]\n if not self.wrap:\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = (j + 1) % ncols == 0\n info[\"top\"] = i == 0\n info[\"bottom\"] = i == nrows - 1\n elif self.wrap_dim == \"col\":\n info[\"left\"] = j % ncols == 0\n info[\"right\"] = ((j + 1) % ncols == 0) or ((j + 1) == self.n_subplots)\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= (self.n_subplots - ncols)\n elif self.wrap_dim == \"row\":\n info[\"left\"] = i < nrows\n info[\"right\"] = i >= self.n_subplots - nrows\n info[\"top\"] = i % nrows == 0\n info[\"bottom\"] = ((i + 1) % nrows == 0) or ((i + 1) == self.n_subplots)\n\n if not pair_spec.get(\"cross\", True):\n info[\"top\"] = j < ncols\n info[\"bottom\"] = j >= self.n_subplots - ncols\n\n for dim in [\"row\", \"col\"]:\n idx = {\"row\": i, \"col\": j}[dim]\n info[dim] = self.grid_dimensions[dim][idx]\n\n for axis in \"xy\":\n\n idx = {\"x\": j, \"y\": i}[axis]\n if axis in pair_spec.get(\"structure\", {}):\n key = f\"{axis}{idx}\"\n else:\n key = axis\n info[axis] = key\n\n self._subplot_list.append(info)\n\n return figure\n\n def __iter__(self) -> Generator[dict, None, None]: # TODO TypedDict?\n \"\"\"Yield each subplot dictionary with Axes object and metadata.\"\"\"\n yield from self._subplot_list\n\n def __len__(self) -> int:\n \"\"\"Return the number of subplots in this figure.\"\"\"\n return len(self._subplot_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/typing.py", "rel_fname": "seaborn/_core/typing.py", "line": 33, "name": "Default", "kind": "def", "category": "class", "info": "__repr__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/typing.py", "rel_fname": "seaborn/_core/typing.py", "line": 38, "name": "Deprecated", "kind": "def", "category": "class", "info": "__repr__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/typing.py", "rel_fname": "seaborn/_core/typing.py", "line": 43, "name": "Default", "kind": "ref", "category": "function", "info": "default = Default()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_core/typing.py", "rel_fname": "seaborn/_core/typing.py", "line": 44, "name": "Deprecated", "kind": "ref", "category": "function", "info": "deprecated = Deprecated()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_decorators.py", "rel_fname": "seaborn/_decorators.py", "line": 3, "name": "share_init_params_with_map", "kind": "def", "category": "function", "info": "def share_init_params_with_map(cls):\n \"\"\"Make cls.map a classmethod with same signature as cls.__init__.\"\"\"\n map_sig = signature(cls.map)\n init_sig = signature(cls.__init__)\n\n new = [v for k, v in init_sig.parameters.items() if k != \"self\"]\n new.insert(0, map_sig.parameters[\"cls\"])\n cls.map.__signature__ = map_sig.replace(parameters=new)\n cls.map.__doc__ = cls.__init__.__doc__\n\n cls.map = classmethod(cls.map)\n\n return cls\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 5, "name": "DocstringComponents", "kind": "def", "category": "class", "info": "__init__\t__getattr__\tfrom_nested_components\tfrom_function_params"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 18, "name": "group", "kind": "ref", "category": "function", "info": " entries[key] = m.group(1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 24, "name": "__getattr__", "kind": "def", "category": "function", "info": " def __getattr__(self, attr):\n \"\"\"Provide dot access to entries for clean raw docstrings.\"\"\"\n if attr in self.entries:\n return self.entries[attr]\n else:\n try:\n return self.__getattribute__(attr)\n except AttributeError as err:\n # If Python is run with -OO, it will strip docstrings and our lookup\n # from self.entries will fail. We check for __debug__, which is actually\n # set to False by -O (it is True for normal execution).\n # But we only want to see an error when building the docs;\n # not something users should see, so this slight inconsistency is fine.\n if __debug__:\n raise err\n else:\n pass\n\n @classmethod\n def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 43, "name": "from_nested_components", "kind": "def", "category": "function", "info": " def from_nested_components(cls, **kwargs):\n \"\"\"Add multiple sub-sets of components.\"\"\"\n return cls(kwargs, strip_whitespace=False)\n\n @classmethod\n def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 45, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(kwargs, strip_whitespace=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 48, "name": "from_function_params", "kind": "def", "category": "function", "info": " def from_function_params(cls, func):\n \"\"\"Use the numpydoc parser to extract components from existing func.\"\"\"\n params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n comp_dict = {}\n for p in params:\n name = p.name\n type = p.type\n desc = \"\\n \".join(p.desc)\n comp_dict[name] = f\"{name} : {type}\\n {desc}\"\n\n return cls(comp_dict)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 50, "name": "NumpyDocString", "kind": "ref", "category": "function", "info": " params = NumpyDocString(pydoc.getdoc(func))[\"Parameters\"]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 58, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(comp_dict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 194, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " params=DocstringComponents(_core_params),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 195, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " returns=DocstringComponents(_core_returns),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_docstrings.py", "rel_fname": "seaborn/_docstrings.py", "line": 196, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " seealso=DocstringComponents(_seealso_blurbs),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 20, "name": "AreaBase", "kind": "def", "category": "class", "info": "_plot\t_standardize_coordinate_parameters\t_postprocess_artist\t_get_verts\t_legend_artist"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 22, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n patches = defaultdict(list)\n\n for keys, data, ax in split_gen():\n\n kws = {}\n data = self._standardize_coordinate_parameters(data, orient)\n resolved = resolve_properties(self, keys, scales)\n verts = self._get_verts(data, orient)\n ax.update_datalim(verts)\n\n # TODO should really move this logic into resolve_color\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n kws[\"facecolor\"] = fc\n kws[\"edgecolor\"] = resolve_color(self, keys, \"edge\", scales)\n kws[\"linewidth\"] = resolved[\"edgewidth\"]\n kws[\"linestyle\"] = resolved[\"edgestyle\"]\n\n patches[ax].append(mpl.patches.Polygon(verts, **kws))\n\n for ax, ax_patches in patches.items():\n\n for patch in ax_patches:\n self._postprocess_artist(patch, ax, orient)\n ax.add_patch(patch)\n\n def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 26, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 29, "name": "_standardize_coordinate_parameters", "kind": "ref", "category": "function", "info": " data = self._standardize_coordinate_parameters(data, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 30, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 31, "name": "_get_verts", "kind": "ref", "category": "function", "info": " verts = self._get_verts(data, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 32, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(verts)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 35, "name": "resolve_color", "kind": "ref", "category": "function", "info": " fc = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 37, "name": "to_rgba", "kind": "ref", "category": "function", "info": " fc = mpl.colors.to_rgba(fc, 0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 40, "name": "resolve_color", "kind": "ref", "category": "function", "info": " kws[\"edgecolor\"] = resolve_color(self, keys, \"edge\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 44, "name": "Polygon", "kind": "ref", "category": "function", "info": " patches[ax].append(mpl.patches.Polygon(verts, **kws))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 49, "name": "_postprocess_artist", "kind": "ref", "category": "function", "info": " self._postprocess_artist(patch, ax, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 50, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(patch)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 52, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 55, "name": "_postprocess_artist", "kind": "def", "category": "function", "info": " def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 58, "name": "_get_verts", "kind": "def", "category": "function", "info": " def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 61, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient, kind=\"mergesort\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 63, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}min\"]].to_numpy(),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 64, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 70, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 73, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 75, "name": "resolve_color", "kind": "ref", "category": "function", "info": " fc = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 77, "name": "to_rgba", "kind": "ref", "category": "function", "info": " fc = mpl.colors.to_rgba(fc, 0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 79, "name": "Patch", "kind": "ref", "category": "function", "info": " return mpl.patches.Patch(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 81, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edgecolor=resolve_color(self, keys, \"edge\", scales),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 90, "name": "Area", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters\t_postprocess_artist"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 103, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 104, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 105, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 106, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 107, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 108, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 109, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 112, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 114, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 116, "name": "rename", "kind": "ref", "category": "function", "info": " return data.rename(columns={\"baseline\": f\"{dv}min\", dv: f\"{dv}max\"})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 118, "name": "_postprocess_artist", "kind": "def", "category": "function", "info": " def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 123, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " artist.set_linewidth(artist.get_linewidth() * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 123, "name": "get_linewidth", "kind": "ref", "category": "function", "info": " artist.set_linewidth(artist.get_linewidth() * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 125, "name": "get_linestyle", "kind": "ref", "category": "function", "info": " linestyle = artist.get_linestyle()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 128, "name": "set_linestyle", "kind": "ref", "category": "function", "info": " artist.set_linestyle(linestyle)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "set_clip_path", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "get_path", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 130, "name": "get_transform", "kind": "ref", "category": "function", "info": " artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 132, "name": "set_clip_box", "kind": "ref", "category": "function", "info": " artist.set_clip_box(ax.bbox)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 140, "name": "Band", "kind": "def", "category": "class", "info": "_standardize_coordinate_parameters"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 153, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 154, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.2, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 155, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 156, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 157, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 158, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(0, )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 159, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableFloat = Mappable(\"-\", )\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 161, "name": "_standardize_coordinate_parameters", "kind": "def", "category": "function", "info": " def _standardize_coordinate_parameters(self, data, orient):\n return data\n\n def _postprocess_artist(self, artist, ax, orient):\n pass\n\n def _get_verts(self, data, orient):\n\n dv = {\"x\": \"y\", \"y\": \"x\"}[orient]\n data = data.sort_values(orient, kind=\"mergesort\")\n verts = np.concatenate([\n data[[orient, f\"{dv}min\"]].to_numpy(),\n data[[orient, f\"{dv}max\"]].to_numpy()[::-1],\n ])\n if orient == \"y\":\n verts = verts[:, ::-1]\n return verts\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n resolved = resolve_properties(self, keys, scales)\n\n fc = resolve_color(self, keys, \"\", scales)\n if not resolved[\"fill\"]:\n fc = mpl.colors.to_rgba(fc, 0)\n\n return mpl.patches.Patch(\n facecolor=fc,\n edgecolor=resolve_color(self, keys, \"edge\", scales),\n linewidth=resolved[\"edgewidth\"],\n linestyle=resolved[\"edgestyle\"],\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 168, "name": "groupby", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 168, "name": "agg", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/area.py", "rel_fname": "seaborn/_marks/area.py", "line": 168, "name": "reset_index", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 27, "name": "BarBase", "kind": "def", "category": "class", "info": "_make_patches\t_resolve_properties\t_legend_artist"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 29, "name": "_make_patches", "kind": "def", "category": "function", "info": " def _make_patches(self, data, scales, orient):\n\n transform = scales[orient]._matplotlib_scale.get_transform()\n forward = transform.transform\n reverse = transform.inverted().transform\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n\n pos = reverse(forward(data[orient]) - data[\"width\"] / 2)\n width = reverse(forward(data[orient]) + data[\"width\"] / 2) - pos\n\n val = (data[other] - data[\"baseline\"]).to_numpy()\n base = data[\"baseline\"].to_numpy()\n\n kws = self._resolve_properties(data, scales)\n if orient == \"x\":\n kws.update(x=pos, y=base, w=width, h=val)\n else:\n kws.update(x=base, y=pos, w=val, h=width)\n\n kws.pop(\"width\", None)\n kws.pop(\"baseline\", None)\n\n val_dim = {\"x\": \"h\", \"y\": \"w\"}[orient]\n bars, vals = [], []\n\n for i in range(len(data)):\n\n row = {k: v[i] for k, v in kws.items()}\n\n # Skip bars with no value. It's possible we'll want to make this\n # an option (i.e so you have an artist for animating or annotating),\n # but let's keep things simple for now.\n if not np.nan_to_num(row[val_dim]):\n continue\n\n bar = mpl.patches.Rectangle(\n xy=(row[\"x\"], row[\"y\"]),\n width=row[\"w\"],\n height=row[\"h\"],\n facecolor=row[\"facecolor\"],\n edgecolor=row[\"edgecolor\"],\n linestyle=row[\"edgestyle\"],\n linewidth=row[\"edgewidth\"],\n **self.artist_kws,\n )\n bars.append(bar)\n vals.append(row[val_dim])\n\n return bars, vals\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 33, "name": "inverted", "kind": "ref", "category": "function", "info": " reverse = transform.inverted().transform\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 37, "name": "forward", "kind": "ref", "category": "function", "info": " pos = reverse(forward(data[orient]) - data[\"width\"] / 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 38, "name": "forward", "kind": "ref", "category": "function", "info": " width = reverse(forward(data[orient]) + data[\"width\"] / 2) - pos\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 40, "name": "to_numpy", "kind": "ref", "category": "function", "info": " val = (data[other] - data[\"baseline\"]).to_numpy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 41, "name": "to_numpy", "kind": "ref", "category": "function", "info": " base = data[\"baseline\"].to_numpy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 43, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " kws = self._resolve_properties(data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 65, "name": "Rectangle", "kind": "ref", "category": "function", "info": " bar = mpl.patches.Rectangle(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 80, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n\n resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n\n fc = resolved[\"facecolor\"]\n if isinstance(fc, tuple):\n resolved[\"facecolor\"] = fc[0], fc[1], fc[2], fc[3] * resolved[\"fill\"]\n else:\n fc[:, 3] = fc[:, 3] * resolved[\"fill\"] # TODO Is inplace mod a problem?\n resolved[\"facecolor\"] = fc\n\n return resolved\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 82, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 84, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 85, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 96, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n # TODO return some sensible default?\n key = {v: value for v in variables}\n key = self._resolve_properties(key, scales)\n artist = mpl.patches.Patch(\n facecolor=key[\"facecolor\"],\n edgecolor=key[\"edgecolor\"],\n linewidth=key[\"edgewidth\"],\n linestyle=key[\"edgestyle\"],\n )\n return artist\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 101, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " key = self._resolve_properties(key, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 102, "name": "Patch", "kind": "ref", "category": "function", "info": " artist = mpl.patches.Patch(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 113, "name": "Bar", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 126, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 127, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 128, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 129, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 130, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 131, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"patch.linewidth\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 132, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 135, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(.8, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 136, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 138, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n val_idx = [\"y\", \"x\"].index(orient)\n\n for _, data, ax in split_gen():\n\n bars, vals = self._make_patches(data, scales, orient)\n\n for bar in bars:\n\n # Because we are clipping the artist (see below), the edges end up\n # looking half as wide as they actually are. I don't love this clumsy\n # workaround, which is going to cause surprises if you work with the\n # artists directly. We may need to revisit after feedback.\n bar.set_linewidth(bar.get_linewidth() * 2)\n linestyle = bar.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))\n bar.set_linestyle(linestyle)\n\n # This is a bit of a hack to handle the fact that the edge lines are\n # centered on the actual extents of the bar, and overlap when bars are\n # stacked or dodged. We may discover that this causes problems and needs\n # to be revisited at some point. Also it should be faster to clip with\n # a bbox than a path, but I cant't work out how to get the intersection\n # with the axes bbox.\n bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n if self.artist_kws.get(\"clip_on\", True):\n # It seems the above hack undoes the default axes clipping\n bar.set_clip_box(ax.bbox)\n bar.sticky_edges[val_idx][:] = (0, np.inf)\n ax.add_patch(bar)\n\n # Add a container which is useful for, e.g. Axes.bar_label\n if _version_predates(mpl, \"3.4\"):\n container_kws = {}\n else:\n orientation = {\"x\": \"vertical\", \"y\": \"horizontal\"}[orient]\n container_kws = dict(datavalues=vals, orientation=orientation)\n container = mpl.container.BarContainer(bars, **container_kws)\n ax.add_container(container)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 142, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 144, "name": "_make_patches", "kind": "ref", "category": "function", "info": " bars, vals = self._make_patches(data, scales, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 152, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " bar.set_linewidth(bar.get_linewidth() * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 152, "name": "get_linewidth", "kind": "ref", "category": "function", "info": " bar.set_linewidth(bar.get_linewidth() * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 153, "name": "get_linestyle", "kind": "ref", "category": "function", "info": " linestyle = bar.get_linestyle()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 156, "name": "set_linestyle", "kind": "ref", "category": "function", "info": " bar.set_linestyle(linestyle)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 164, "name": "get_path", "kind": "ref", "category": "function", "info": " bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 169, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(bar)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 172, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if _version_predates(mpl, \"3.4\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 177, "name": "BarContainer", "kind": "ref", "category": "function", "info": " container = mpl.container.BarContainer(bars, **container_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 178, "name": "add_container", "kind": "ref", "category": "function", "info": " ax.add_container(container)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 183, "name": "Bars", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 196, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 197, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(.7, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 198, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 199, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(rc=\"patch.edgecolor\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 200, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 201, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(auto=True, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 202, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 205, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 206, "name": "Mappable", "kind": "ref", "category": "function", "info": " baseline: MappableFloat = Mappable(0, grouping=False) # TODO *is* this mappable?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 208, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n val_idx = [\"y\", \"x\"].index(orient)\n\n for _, data, ax in split_gen():\n\n bars, vals = self._make_patches(data, scales, orient)\n\n for bar in bars:\n\n # Because we are clipping the artist (see below), the edges end up\n # looking half as wide as they actually are. I don't love this clumsy\n # workaround, which is going to cause surprises if you work with the\n # artists directly. We may need to revisit after feedback.\n bar.set_linewidth(bar.get_linewidth() * 2)\n linestyle = bar.get_linestyle()\n if linestyle[1]:\n linestyle = (linestyle[0], tuple(x / 2 for x in linestyle[1]))\n bar.set_linestyle(linestyle)\n\n # This is a bit of a hack to handle the fact that the edge lines are\n # centered on the actual extents of the bar, and overlap when bars are\n # stacked or dodged. We may discover that this causes problems and needs\n # to be revisited at some point. Also it should be faster to clip with\n # a bbox than a path, but I cant't work out how to get the intersection\n # with the axes bbox.\n bar.set_clip_path(bar.get_path(), bar.get_transform() + ax.transData)\n if self.artist_kws.get(\"clip_on\", True):\n # It seems the above hack undoes the default axes clipping\n bar.set_clip_box(ax.bbox)\n bar.sticky_edges[val_idx][:] = (0, np.inf)\n ax.add_patch(bar)\n\n # Add a container which is useful for, e.g. Axes.bar_label\n if _version_predates(mpl, \"3.4\"):\n container_kws = {}\n else:\n orientation = {\"x\": \"vertical\", \"y\": \"horizontal\"}[orient]\n container_kws = dict(datavalues=vals, orientation=orientation)\n container = mpl.container.BarContainer(bars, **container_kws)\n ax.add_container(container)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 214, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 215, "name": "_make_patches", "kind": "ref", "category": "function", "info": " bars, _ = self._make_patches(data, scales, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 221, "name": "PatchCollection", "kind": "ref", "category": "function", "info": " col = mpl.collections.PatchCollection(ax_patches, match_original=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 223, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(col, autolim=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 229, "name": "get_paths", "kind": "ref", "category": "function", "info": " xys = np.vstack([path.vertices for path in col.get_paths()])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 230, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 235, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 237, "name": "get_dimensions", "kind": "def", "category": "function", "info": " def get_dimensions(collection):\n edges, widths = [], []\n for verts in (path.vertices for path in collection.get_paths()):\n edges.append(min(verts[:, ori_idx]))\n widths.append(np.ptp(verts[:, ori_idx]))\n return np.array(edges), np.array(widths)\n\n min_width = np.inf\n for ax, col in collections.items():\n edges, widths = get_dimensions(col)\n points = 72 / ax.figure.dpi * abs(\n ax.transData.transform([edges + widths] * 2)\n - ax.transData.transform([edges] * 2)\n )\n min_width = min(min_width, min(points[:, ori_idx]))\n\n linewidth = min(.1 * min_width, mpl.rcParams[\"patch.linewidth\"])\n for _, col in collections.items():\n col.set_linewidth(linewidth)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 239, "name": "get_paths", "kind": "ref", "category": "function", "info": " for verts in (path.vertices for path in collection.get_paths()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 246, "name": "get_dimensions", "kind": "ref", "category": "function", "info": " edges, widths = get_dimensions(col)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 248, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([edges + widths] * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 249, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([edges] * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/bar.py", "rel_fname": "seaborn/_marks/bar.py", "line": 255, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " col.set_linewidth(linewidth)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 25, "name": "Mappable", "kind": "def", "category": "class", "info": "__init__\t__repr__\tdepend\tgrouping\tdefault"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 77, "name": "depend", "kind": "def", "category": "function", "info": " def depend(self) -> Any:\n \"\"\"Return the name of the feature to source a default value from.\"\"\"\n return self._depend\n\n @property\n def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 82, "name": "grouping", "kind": "def", "category": "function", "info": " def grouping(self) -> bool:\n return self._grouping\n\n @property\n def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 86, "name": "default", "kind": "def", "category": "function", "info": " def default(self) -> Any:\n \"\"\"Get the default value for this feature, or access the relevant rcParam.\"\"\"\n if self._val is not None:\n return self._val\n return mpl.rcParams.get(self._rc)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 103, "name": "Mark", "kind": "def", "category": "class", "info": "_mappable_props\t_grouping_props\t_resolve\t_infer_orient\t_plot\t_legend_artist"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 109, "name": "_mappable_props", "kind": "def", "category": "function", "info": " def _mappable_props(self):\n return {\n f.name: getattr(self, f.name) for f in fields(self)\n if isinstance(f.default, Mappable)\n }\n\n @property\n def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n scale = scales[name]\n value = data[name]\n try:\n feature = scale(value)\n except Exception as err:\n raise PlotSpecError._during(\"Scaling operation\", name) from err\n\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 116, "name": "_grouping_props", "kind": "def", "category": "function", "info": " def _grouping_props(self):\n # TODO does it make sense to have variation within a Mark's\n # properties about whether they are grouping?\n return [\n f.name for f in fields(self)\n if isinstance(f.default, Mappable) and f.default.grouping\n ]\n\n # TODO make this method private? Would extender every need to call directly?\n def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n scale = scales[name]\n value = data[name]\n try:\n feature = scale(value)\n except Exception as err:\n raise PlotSpecError._during(\"Scaling operation\", name) from err\n\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 125, "name": "_resolve", "kind": "def", "category": "function", "info": " def _resolve(\n self,\n data: DataFrame | dict[str, Any],\n name: str,\n scales: dict[str, Scale] | None = None,\n ) -> Any:\n \"\"\"Obtain default, specified, or mapped value for a named feature.\n\n Parameters\n ----------\n data : DataFrame or dict with scalar values\n Container with data values for features that will be semantically mapped.\n name : string\n Identity of the feature / semantic.\n scales: dict\n Mapping from variable to corresponding scale object.\n\n Returns\n -------\n value or array of values\n Outer return type depends on whether `data` is a dict (implying that\n we want a single value) or DataFrame (implying that we want an array\n of values with matching length).\n\n \"\"\"\n feature = self._mappable_props[name]\n prop = PROPERTIES.get(name, Property(name))\n directly_specified = not isinstance(feature, Mappable)\n return_multiple = isinstance(data, pd.DataFrame)\n return_array = return_multiple and not name.endswith(\"style\")\n\n # Special case width because it needs to be resolved and added to the dataframe\n # during layer prep (so the Move operations use it properly).\n # TODO how does width *scaling* work, e.g. for violin width by count?\n if name == \"width\":\n directly_specified = directly_specified and name not in data\n\n if directly_specified:\n feature = prop.standardize(feature)\n if return_multiple:\n feature = [feature] * len(data)\n if return_array:\n feature = np.array(feature)\n return feature\n\n if name in data:\n if scales is None or name not in scales:\n # TODO Might this obviate the identity scale? Just don't add a scale?\n feature = data[name]\n else:\n scale = scales[name]\n value = data[name]\n try:\n feature = scale(value)\n except Exception as err:\n raise PlotSpecError._during(\"Scaling operation\", name) from err\n\n if return_array:\n feature = np.asarray(feature)\n return feature\n\n if feature.depend is not None:\n # TODO add source_func or similar to transform the source value?\n # e.g. set linewidth as a proportion of pointsize?\n return self._resolve(data, feature.depend, scales)\n\n default = prop.standardize(feature.default)\n if return_multiple:\n default = [default] * len(data)\n if return_array:\n default = np.array(default)\n return default\n\n def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 151, "name": "Property", "kind": "ref", "category": "function", "info": " prop = PROPERTIES.get(name, Property(name))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 163, "name": "standardize", "kind": "ref", "category": "function", "info": " feature = prop.standardize(feature)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 178, "name": "scale", "kind": "ref", "category": "function", "info": " feature = scale(value)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 180, "name": "_during", "kind": "ref", "category": "function", "info": " raise PlotSpecError._during(\"Scaling operation\", name) from err\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 189, "name": "_resolve", "kind": "ref", "category": "function", "info": " return self._resolve(data, feature.depend, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 191, "name": "standardize", "kind": "ref", "category": "function", "info": " default = prop.standardize(feature.default)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 198, "name": "_infer_orient", "kind": "def", "category": "function", "info": " def _infer_orient(self, scales: dict) -> str: # TODO type scales\n\n # TODO The original version of this (in seaborn._oldcore) did more checking.\n # Paring that down here for the prototype to see what restrictions make sense.\n\n # TODO rethink this to map from scale type to \"DV priority\" and use that?\n # e.g. Nominal > Discrete > Continuous\n\n x = 0 if \"x\" not in scales else scales[\"x\"]._priority\n y = 0 if \"y\" not in scales else scales[\"y\"]._priority\n\n if y > x:\n return \"y\"\n else:\n return \"x\"\n\n def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 214, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(\n self,\n split_generator: Callable[[], Generator],\n scales: dict[str, Scale],\n orient: str,\n ) -> None:\n \"\"\"Main interface for creating a plot.\"\"\"\n raise NotImplementedError()\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 223, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 230, "name": "resolve_properties", "kind": "def", "category": "function", "info": "def resolve_properties(\n mark: Mark, data: DataFrame, scales: dict[str, Scale]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 235, "name": "_resolve", "kind": "ref", "category": "function", "info": " name: mark._resolve(data, name, scales) for name in mark._mappable_props\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 240, "name": "resolve_color", "kind": "def", "category": "function", "info": "def resolve_color(\n mark: Mark,\n data: DataFrame | dict,\n prefix: str = \"\",\n scales: dict[str, Scale] | None = None,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 266, "name": "_resolve", "kind": "ref", "category": "function", "info": " color = mark._resolve(data, f\"{prefix}color\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 269, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, f\"{prefix}alpha\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 271, "name": "_resolve", "kind": "ref", "category": "function", "info": " alpha = mark._resolve(data, \"alpha\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 273, "name": "visible", "kind": "def", "category": "function", "info": " def visible(x, axis=None):\n \"\"\"Detect \"invisible\" colors to set alpha appropriately.\"\"\"\n # TODO First clause only needed to handle non-rgba arrays,\n # which we are trying to handle upstream\n return np.array(x).dtype.kind != \"f\" or np.isfinite(x).all(axis)\n\n # Second check here catches vectors of strings with identity scale\n # It could probably be handled better upstream. This is a tricky problem\n if np.ndim(color) < 2 and all(isinstance(x, float) for x in color):\n if len(color) == 4:\n return mpl.colors.to_rgba(color)\n alpha = alpha if visible(color) else np.nan\n return mpl.colors.to_rgba(color, alpha)\n else:\n if np.ndim(color) == 2 and color.shape[1] == 4:\n return mpl.colors.to_rgba_array(color)\n alpha = np.where(visible(color, axis=1), alpha, np.nan)\n return mpl.colors.to_rgba_array(color, alpha)\n\n # TODO should we be implementing fill here too?\n # (i.e. set fillalpha to 0 when fill=False)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 283, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 284, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = alpha if visible(color) else np.nan\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 285, "name": "to_rgba", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba(color, alpha)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 288, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 289, "name": "visible", "kind": "ref", "category": "function", "info": " alpha = np.where(visible(color, axis=1), alpha, np.nan)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 290, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgba_array(color, alpha)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/base.py", "rel_fname": "seaborn/_marks/base.py", "line": 296, "name": "document_properties", "kind": "def", "category": "function", "info": "def document_properties(mark):\n\n properties = [f.name for f in fields(mark) if isinstance(f.default, Mappable)]\n text = [\n \"\",\n \" This mark defines the following properties:\",\n textwrap.fill(\n \", \".join([f\"|{p}|\" for p in properties]),\n width=78, initial_indent=\" \" * 8, subsequent_indent=\" \" * 8,\n ),\n ]\n\n docstring_lines = mark.__doc__.split(\"\\n\")\n new_docstring = \"\\n\".join([\n *docstring_lines[:2],\n *text,\n *docstring_lines[2:],\n ])\n mark.__doc__ = new_docstring\n return mark\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 26, "name": "DotBase", "kind": "def", "category": "class", "info": "_resolve_paths\t_resolve_properties\t_plot\t_legend_artist"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 28, "name": "_resolve_paths", "kind": "def", "category": "function", "info": " def _resolve_paths(self, data):\n\n paths = []\n path_cache = {}\n marker = data[\"marker\"]\n\n def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 34, "name": "get_transformed_path", "kind": "def", "category": "function", "info": " def get_transformed_path(m):\n return m.get_path().transformed(m.get_transform())\n\n if isinstance(marker, mpl.markers.MarkerStyle):\n return get_transformed_path(marker)\n\n for m in marker:\n if m not in path_cache:\n path_cache[m] = get_transformed_path(m)\n paths.append(path_cache[m])\n return paths\n\n def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 35, "name": "get_path", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 35, "name": "transformed", "kind": "ref", "category": "function", "info": " return m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 38, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " return get_transformed_path(marker)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 42, "name": "get_transformed_path", "kind": "ref", "category": "function", "info": " path_cache[m] = get_transformed_path(m)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 46, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 48, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " resolved = resolve_properties(self, data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 49, "name": "_resolve_paths", "kind": "ref", "category": "function", "info": " resolved[\"path\"] = self._resolve_paths(resolved)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 53, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = resolved[\"marker\"].is_filled()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 55, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 61, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 67, "name": "split_gen", "kind": "ref", "category": "function", "info": " for _, data, ax in split_gen():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 70, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " data = self._resolve_properties(data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 72, "name": "PathCollection", "kind": "ref", "category": "function", "info": " points = mpl.collections.PathCollection(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 81, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 84, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(points)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 86, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 91, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " res = self._resolve_properties(key, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 93, "name": "PathCollection", "kind": "ref", "category": "function", "info": " return mpl.collections.PathCollection(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 100, "name": "IdentityTransform", "kind": "ref", "category": "function", "info": " transform=mpl.transforms.IdentityTransform(),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 107, "name": "Dot", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 120, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(\"o\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 121, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(6, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 122, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 123, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 124, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 125, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 126, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 127, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgealpha: MappableFloat = Mappable(depend=\"alpha\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 128, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(.5, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 129, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgestyle: MappableStyle = Mappable(\"-\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 131, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 133, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 140, "name": "resolve_color", "kind": "ref", "category": "function", "info": " main_color = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 141, "name": "resolve_color", "kind": "ref", "category": "function", "info": " edge_color = resolve_color(self, data, \"edge\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 161, "name": "Dots", "kind": "def", "category": "class", "info": "_resolve_properties"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 175, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"scatter.marker\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 176, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(4, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 177, "name": "Mappable", "kind": "ref", "category": "function", "info": " stroke: MappableFloat = Mappable(.75, grouping=False) # TODO rcParam?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 178, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 179, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1, grouping=False) # TODO auto alpha?\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 180, "name": "Mappable", "kind": "ref", "category": "function", "info": " fill: MappableBool = Mappable(True, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 181, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\", grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 182, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillalpha: MappableFloat = Mappable(.2, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 184, "name": "_resolve_properties", "kind": "def", "category": "function", "info": " def _resolve_properties(self, data, scales):\n\n resolved = resolve_properties(self, data, scales)\n resolved[\"path\"] = self._resolve_paths(resolved)\n resolved[\"size\"] = resolved[\"pointsize\"] ** 2\n\n if isinstance(data, dict): # Properties for single dot\n filled_marker = resolved[\"marker\"].is_filled()\n else:\n filled_marker = [m.is_filled() for m in resolved[\"marker\"]]\n\n resolved[\"fill\"] = resolved[\"fill\"] * filled_marker\n\n return resolved\n\n def _plot(self, split_gen, scales, orient):\n\n # TODO Not backcompat with allowed (but nonfunctional) univariate plots\n # (That should be solved upstream by defaulting to \"\" for unset x/y?)\n # (Be mindful of xmin/xmax, etc!)\n\n for _, data, ax in split_gen():\n\n offsets = np.column_stack([data[\"x\"], data[\"y\"]])\n data = self._resolve_properties(data, scales)\n\n points = mpl.collections.PathCollection(\n offsets=offsets,\n paths=data[\"path\"],\n sizes=data[\"size\"],\n facecolors=data[\"facecolor\"],\n edgecolors=data[\"edgecolor\"],\n linewidths=data[\"linewidth\"],\n linestyles=data[\"edgestyle\"],\n transOffset=ax.transData,\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n ax.add_collection(points)\n\n def _legend_artist(\n self, variables: list[str], value: Any, scales: dict[str, Scale],\n ) -> Artist:\n\n key = {v: value for v in variables}\n res = self._resolve_properties(key, scales)\n\n return mpl.collections.PathCollection(\n paths=[res[\"path\"]],\n sizes=[res[\"size\"]],\n facecolors=[res[\"facecolor\"]],\n edgecolors=[res[\"edgecolor\"]],\n linewidths=[res[\"linewidth\"]],\n linestyles=[res[\"edgestyle\"]],\n transform=mpl.transforms.IdentityTransform(),\n **self.artist_kws,\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 186, "name": "_resolve_properties", "kind": "ref", "category": "function", "info": " resolved = super()._resolve_properties(data, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 188, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"facecolor\"] = resolve_color(self, data, \"fill\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/dot.py", "rel_fname": "seaborn/_marks/dot.py", "line": 189, "name": "resolve_color", "kind": "ref", "category": "function", "info": " resolved[\"edgecolor\"] = resolve_color(self, data, \"\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 35, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 36, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 38, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 39, "name": "Mappable", "kind": "ref", "category": "function", "info": " marker: MappableString = Mappable(rc=\"lines.marker\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 40, "name": "Mappable", "kind": "ref", "category": "function", "info": " pointsize: MappableFloat = Mappable(rc=\"lines.markersize\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 41, "name": "Mappable", "kind": "ref", "category": "function", "info": " fillcolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 42, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgecolor: MappableColor = Mappable(depend=\"color\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 43, "name": "Mappable", "kind": "ref", "category": "function", "info": " edgewidth: MappableFloat = Mappable(rc=\"lines.markeredgewidth\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 47, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 49, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 51, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 52, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 53, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 54, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 57, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient, kind=\"mergesort\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 60, "name": "_handle_capstyle", "kind": "ref", "category": "function", "info": " self._handle_capstyle(artist_kws, vals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 62, "name": "Line2D", "kind": "ref", "category": "function", "info": " line = mpl.lines.Line2D(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 63, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"x\"].to_numpy(),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 64, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"y\"].to_numpy(),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 75, "name": "add_line", "kind": "ref", "category": "function", "info": " ax.add_line(line)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 77, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 80, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 81, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 82, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 83, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 86, "name": "_handle_capstyle", "kind": "ref", "category": "function", "info": " self._handle_capstyle(artist_kws, vals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 88, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 101, "name": "_handle_capstyle", "kind": "def", "category": "function", "info": " def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 112, "name": "Line", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 131, "name": "Paths", "kind": "def", "category": "class", "info": "__post_init__\t_plot\t_legend_artist\t_setup_segments"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 144, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"C0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 145, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 146, "name": "Mappable", "kind": "ref", "category": "function", "info": " linewidth: MappableFloat = Mappable(rc=\"lines.linewidth\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 147, "name": "Mappable", "kind": "ref", "category": "function", "info": " linestyle: MappableString = Mappable(rc=\"lines.linestyle\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 151, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n # LineCollection artists have a capstyle property but don't source its value\n # from the rc, so we do that manually here. Unfortunately, because we add\n # only one LineCollection, we have the use the same capstyle for all lines\n # even when they are dashed. It's a slight inconsistency, but looks fine IMO.\n self.artist_kws.setdefault(\"capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n\n def _plot(self, split_gen, scales, orient):\n\n line_data = {}\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n if ax not in line_data:\n line_data[ax] = {\n \"segments\": [],\n \"colors\": [],\n \"linewidths\": [],\n \"linestyles\": [],\n }\n\n segments = self._setup_segments(data, orient)\n line_data[ax][\"segments\"].extend(segments)\n n = len(segments)\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n\n line_data[ax][\"colors\"].extend([vals[\"color\"]] * n)\n line_data[ax][\"linewidths\"].extend([vals[\"linewidth\"]] * n)\n line_data[ax][\"linestyles\"].extend([vals[\"linestyle\"]] * n)\n\n for ax, ax_data in line_data.items():\n lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n # Handle datalim update manually\n # https://github.com/matplotlib/matplotlib/issues/23129\n ax.add_collection(lines, autolim=False)\n if ax_data[\"segments\"]:\n xy = np.concatenate(ax_data[\"segments\"])\n ax.update_datalim(xy)\n\n def _legend_artist(self, variables, value, scales):\n\n key = resolve_properties(self, {v: value for v in variables}, scales)\n\n artist_kws = self.artist_kws.copy()\n capstyle = artist_kws.pop(\"capstyle\")\n artist_kws[\"solid_capstyle\"] = capstyle\n artist_kws[\"dash_capstyle\"] = capstyle\n\n return mpl.lines.Line2D(\n [], [],\n color=key[\"color\"],\n linewidth=key[\"linewidth\"],\n linestyle=key[\"linestyle\"],\n **artist_kws,\n )\n\n def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 159, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n for keys, data, ax in split_gen(keep_na=not self._sort):\n\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n line = mpl.lines.Line2D(\n data[\"x\"].to_numpy(),\n data[\"y\"].to_numpy(),\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n ax.add_line(line)\n\n def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 162, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen(keep_na=not self._sort):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 172, "name": "_setup_segments", "kind": "ref", "category": "function", "info": " segments = self._setup_segments(data, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 176, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 177, "name": "resolve_color", "kind": "ref", "category": "function", "info": " vals[\"color\"] = resolve_color(self, keys, scales=scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 184, "name": "LineCollection", "kind": "ref", "category": "function", "info": " lines = mpl.collections.LineCollection(**ax_data, **self.artist_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 187, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines, autolim=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 190, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xy)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 192, "name": "_legend_artist", "kind": "def", "category": "function", "info": " def _legend_artist(self, variables, value, scales):\n\n keys = {v: value for v in variables}\n vals = resolve_properties(self, keys, scales)\n vals[\"color\"] = resolve_color(self, keys, scales=scales)\n vals[\"fillcolor\"] = resolve_color(self, keys, prefix=\"fill\", scales=scales)\n vals[\"edgecolor\"] = resolve_color(self, keys, prefix=\"edge\", scales=scales)\n\n artist_kws = self.artist_kws.copy()\n self._handle_capstyle(artist_kws, vals)\n\n return mpl.lines.Line2D(\n [], [],\n color=vals[\"color\"],\n linewidth=vals[\"linewidth\"],\n linestyle=vals[\"linestyle\"],\n marker=vals[\"marker\"],\n markersize=vals[\"pointsize\"],\n markerfacecolor=vals[\"fillcolor\"],\n markeredgecolor=vals[\"edgecolor\"],\n markeredgewidth=vals[\"edgewidth\"],\n **artist_kws,\n )\n\n def _handle_capstyle(self, kws, vals):\n\n # Work around for this matplotlib issue:\n # https://github.com/matplotlib/matplotlib/issues/23437\n if vals[\"linestyle\"][1] is None:\n capstyle = kws.get(\"solid_capstyle\", mpl.rcParams[\"lines.solid_capstyle\"])\n kws[\"dash_capstyle\"] = capstyle\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 194, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " key = resolve_properties(self, {v: value for v in variables}, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 201, "name": "Line2D", "kind": "ref", "category": "function", "info": " return mpl.lines.Line2D(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 209, "name": "_setup_segments", "kind": "def", "category": "function", "info": " def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 212, "name": "sort_values", "kind": "ref", "category": "function", "info": " data = data.sort_values(orient, kind=\"mergesort\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 222, "name": "Lines", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 240, "name": "Range", "kind": "def", "category": "class", "info": "_setup_segments"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 249, "name": "_setup_segments", "kind": "def", "category": "function", "info": " def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 256, "name": "groupby", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 256, "name": "agg", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 256, "name": "reset_index", "kind": "ref", "category": "function", "info": " data = data.groupby(orient).agg(**agg).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 259, "name": "melt", "kind": "ref", "category": "function", "info": " data = data[cols].melt(orient, value_name=val)[[\"x\", \"y\"]]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 260, "name": "to_numpy", "kind": "ref", "category": "function", "info": " segments = [d.to_numpy() for _, d in data.groupby(orient)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 260, "name": "groupby", "kind": "ref", "category": "function", "info": " segments = [d.to_numpy() for _, d in data.groupby(orient)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 266, "name": "Dash", "kind": "def", "category": "class", "info": "_setup_segments"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 275, "name": "Mappable", "kind": "ref", "category": "function", "info": " width: MappableFloat = Mappable(.8, grouping=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 277, "name": "_setup_segments", "kind": "def", "category": "function", "info": " def _setup_segments(self, data, orient):\n\n if self._sort:\n data = data.sort_values(orient, kind=\"mergesort\")\n\n # Column stack to avoid block consolidation\n xy = np.column_stack([data[\"x\"], data[\"y\"]])\n\n return [xy]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 280, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[\"x\", \"y\"]].to_numpy().astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/line.py", "rel_fname": "seaborn/_marks/line.py", "line": 280, "name": "astype", "kind": "ref", "category": "function", "info": " xys = data[[\"x\", \"y\"]].to_numpy().astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 22, "name": "Text", "kind": "def", "category": "class", "info": "_plot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 31, "name": "Mappable", "kind": "ref", "category": "function", "info": " text: MappableString = Mappable(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 32, "name": "Mappable", "kind": "ref", "category": "function", "info": " color: MappableColor = Mappable(\"k\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 33, "name": "Mappable", "kind": "ref", "category": "function", "info": " alpha: MappableFloat = Mappable(1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 34, "name": "Mappable", "kind": "ref", "category": "function", "info": " fontsize: MappableFloat = Mappable(rc=\"font.size\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 35, "name": "Mappable", "kind": "ref", "category": "function", "info": " halign: MappableString = Mappable(\"center\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 36, "name": "Mappable", "kind": "ref", "category": "function", "info": " valign: MappableString = Mappable(\"center_baseline\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 37, "name": "Mappable", "kind": "ref", "category": "function", "info": " offset: MappableFloat = Mappable(4)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 39, "name": "_plot", "kind": "def", "category": "function", "info": " def _plot(self, split_gen, scales, orient):\n\n ax_data = defaultdict(list)\n\n for keys, data, ax in split_gen():\n\n vals = resolve_properties(self, keys, scales)\n color = resolve_color(self, keys, \"\", scales)\n\n halign = vals[\"halign\"]\n valign = vals[\"valign\"]\n fontsize = vals[\"fontsize\"]\n offset = vals[\"offset\"] / 72\n\n offset_trans = ScaledTranslation(\n {\"right\": -offset, \"left\": +offset}.get(halign, 0),\n {\"top\": -offset, \"bottom\": +offset, \"baseline\": +offset}.get(valign, 0),\n ax.figure.dpi_scale_trans,\n )\n\n for row in data.to_dict(\"records\"):\n artist = mpl.text.Text(\n x=row[\"x\"],\n y=row[\"y\"],\n text=str(row.get(\"text\", vals[\"text\"])),\n color=color,\n fontsize=fontsize,\n horizontalalignment=halign,\n verticalalignment=valign,\n transform=ax.transData + offset_trans,\n **self.artist_kws,\n )\n ax.add_artist(artist)\n ax_data[ax].append([row[\"x\"], row[\"y\"]])\n\n for ax, ax_vals in ax_data.items():\n ax.update_datalim(np.array(ax_vals))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 43, "name": "split_gen", "kind": "ref", "category": "function", "info": " for keys, data, ax in split_gen():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 45, "name": "resolve_properties", "kind": "ref", "category": "function", "info": " vals = resolve_properties(self, keys, scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 46, "name": "resolve_color", "kind": "ref", "category": "function", "info": " color = resolve_color(self, keys, \"\", scales)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 59, "name": "to_dict", "kind": "ref", "category": "function", "info": " for row in data.to_dict(\"records\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 60, "name": "Text", "kind": "ref", "category": "function", "info": " artist = mpl.text.Text(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 71, "name": "add_artist", "kind": "ref", "category": "function", "info": " ax.add_artist(artist)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_marks/text.py", "rel_fname": "seaborn/_marks/text.py", "line": 75, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(np.array(ax_vals))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 28, "name": "SemanticMapping", "kind": "def", "category": "class", "info": "__init__\tmap\t_check_list_length\t_lookup_single\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 54, "name": "cls", "kind": "ref", "category": "function", "info": " setattr(plotter, method_name, cls(plotter, *args, **kwargs))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 57, "name": "_check_list_length", "kind": "def", "category": "function", "info": " def _check_list_length(self, levels, values, variable):\n \"\"\"Input check when values are provided as a list.\"\"\"\n # Copied from _core/properties; eventually will be replaced for that.\n message = \"\"\n if len(levels) > len(values):\n message = \" \".join([\n f\"\\nThe {variable} list has fewer values ({len(values)})\",\n f\"than needed ({len(levels)}) and will cycle, which may\",\n \"produce an uninterpretable plot.\"\n ])\n values = [x for _, x in zip(levels, itertools.cycle(values))]\n\n elif len(values) > len(levels):\n message = \" \".join([\n f\"The {variable} list has more values ({len(values)})\",\n f\"than needed ({len(levels)}), which may not be intended.\",\n ])\n values = values[:len(levels)]\n\n if message:\n warnings.warn(message, UserWarning, stacklevel=6)\n\n return values\n\n def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 81, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 88, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return [self._lookup_single(k, *args, **kwargs) for k in key]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 90, "name": "_lookup_single", "kind": "ref", "category": "function", "info": " return self._lookup_single(key, *args, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 94, "name": "HueMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\tinfer_map_type\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 125, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 138, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, cmap = self.numeric_mapping(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 147, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 156, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 170, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 186, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 193, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 195, "name": "cmap", "kind": "ref", "category": "function", "info": " value = self.cmap(normed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 198, "name": "desaturate", "kind": "ref", "category": "function", "info": " value = desaturate(value, self.saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 202, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, palette, norm, input_format, var_type):\n \"\"\"Determine how to implement the mapping.\"\"\"\n if palette in QUAL_PALETTES:\n map_type = \"categorical\"\n elif norm is not None:\n map_type = \"numeric\"\n elif isinstance(palette, (dict, list)):\n map_type = \"categorical\"\n elif input_format == \"wide\":\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n colors = self._check_list_length(levels, palette, \"palette\")\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 217, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, palette, order):\n \"\"\"Determine colors when the hue mapping is categorical.\"\"\"\n # -- Identify the order and name of the levels\n\n levels = categorical_order(data, order)\n n_colors = len(levels)\n\n # -- Identify the set of colors to use\n\n if isinstance(palette, dict):\n\n missing = set(levels) - set(palette)\n if any(missing):\n err = \"The palette dictionary is missing keys: {}\"\n raise ValueError(err.format(missing))\n\n lookup_table = palette\n\n else:\n\n if palette is None:\n if n_colors <= len(get_color_cycle()):\n colors = color_palette(None, n_colors)\n else:\n colors = color_palette(\"husl\", n_colors)\n elif isinstance(palette, list):\n colors = self._check_list_length(levels, palette, \"palette\")\n else:\n colors = color_palette(palette, n_colors)\n\n lookup_table = dict(zip(levels, colors))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 221, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 238, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " if n_colors <= len(get_color_cycle()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 239, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(None, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 241, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 243, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " colors = self._check_list_length(levels, palette, \"palette\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 245, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 251, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, palette, norm):\n \"\"\"Determine colors when the hue variable is quantitative.\"\"\"\n if isinstance(palette, dict):\n\n # The presence of a norm object overrides a dictionary of hues\n # in specifying a numeric mapping, so we need to process it here.\n levels = list(sorted(palette))\n colors = [palette[k] for k in sorted(palette)]\n cmap = mpl.colors.ListedColormap(colors)\n lookup_table = palette.copy()\n\n else:\n\n # The levels are the sorted unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n # --- Sort out the colormap to use from the palette argument\n\n # Default numeric palette is our default cubehelix palette\n # TODO do we want to do something complicated to ensure contrast?\n palette = \"ch:\" if palette is None else palette\n\n if isinstance(palette, mpl.colors.Colormap):\n cmap = palette\n else:\n cmap = color_palette(palette, as_cmap=True)\n\n # Now sort out the data normalization\n if norm is None:\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = \"``hue_norm`` must be None, tuple, or Normalize object.\"\n raise ValueError(err)\n\n if not norm.scaled():\n norm(np.asarray(data.dropna()))\n\n lookup_table = dict(zip(levels, cmap(norm(levels))))\n\n return levels, lookup_table, norm, cmap\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 259, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 265, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 276, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(palette, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 280, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 282, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 287, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 288, "name": "norm", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 288, "name": "dropna", "kind": "ref", "category": "function", "info": " norm(np.asarray(data.dropna()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 290, "name": "cmap", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 290, "name": "norm", "kind": "ref", "category": "function", "info": " lookup_table = dict(zip(levels, cmap(norm(levels))))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 296, "name": "SizeMapping", "kind": "def", "category": "class", "info": "__init__\tinfer_map_type\t_lookup_single\tcategorical_mapping\tnumeric_mapping"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 317, "name": "infer_map_type", "kind": "ref", "category": "function", "info": " map_type = self.infer_map_type(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 325, "name": "numeric_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table, norm, size_range = self.numeric_mapping(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 333, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 343, "name": "categorical_mapping", "kind": "ref", "category": "function", "info": " levels, lookup_table = self.categorical_mapping(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 357, "name": "infer_map_type", "kind": "def", "category": "function", "info": " def infer_map_type(self, norm, sizes, var_type):\n\n if norm is not None:\n map_type = \"numeric\"\n elif isinstance(sizes, (dict, list)):\n map_type = \"categorical\"\n else:\n map_type = var_type\n\n return map_type\n\n def _lookup_single(self, key):\n\n try:\n value = self.lookup_table[key]\n except KeyError:\n normed = self.norm(key)\n if np.ma.is_masked(normed):\n normed = np.nan\n value = self.size_range[0] + normed * np.ptp(self.size_range)\n return value\n\n def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n sizes = self._check_list_length(levels, sizes, \"sizes\")\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 368, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key):\n \"\"\"Apply the mapping to a single data value.\"\"\"\n return self.lookup_table[key]\n\n def __call__(self, key, *args, **kwargs):\n \"\"\"Get the attribute(s) values for the data key.\"\"\"\n if isinstance(key, (list, np.ndarray, pd.Series)):\n return [self._lookup_single(k, *args, **kwargs) for k in key]\n else:\n return self._lookup_single(key, *args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 373, "name": "norm", "kind": "ref", "category": "function", "info": " normed = self.norm(key)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 374, "name": "is_masked", "kind": "ref", "category": "function", "info": " if np.ma.is_masked(normed):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 379, "name": "categorical_mapping", "kind": "def", "category": "function", "info": " def categorical_mapping(self, data, sizes, order):\n\n levels = categorical_order(data, order)\n\n if isinstance(sizes, dict):\n\n # Dict inputs map existing data values to the size attribute\n missing = set(levels) - set(sizes)\n if any(missing):\n err = f\"Missing sizes for the following levels: {missing}\"\n raise ValueError(err)\n lookup_table = sizes.copy()\n\n elif isinstance(sizes, list):\n\n # List inputs give size values in the same order as the levels\n sizes = self._check_list_length(levels, sizes, \"sizes\")\n lookup_table = dict(zip(levels, sizes))\n\n else:\n\n if isinstance(sizes, tuple):\n\n # Tuple input sets the min, max size values\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, we need to get the min, max size values from\n # the plotter object we are attached to.\n\n # TODO this is going to cause us trouble later, because we\n # want to restructure things so that the plotter is generic\n # across the visual representation of the data. But at this\n # point, we don't know the visual representation. Likely we\n # want to change the logic of this Mapping so that it gives\n # points on a normalized range that then gets un-normalized\n # when we know what we're drawing. But given the way the\n # package works now, this way is cleanest.\n sizes = self.plotter._default_size_range\n\n # For categorical sizes, use regularly-spaced linear steps\n # between the minimum and maximum sizes. Then reverse the\n # ramp so that the largest value is used for the first entry\n # in size_order, etc. This is because \"ordered\" categories\n # are often though to go in decreasing priority.\n sizes = np.linspace(*sizes, len(levels))[::-1]\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table\n\n def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 381, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 395, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " sizes = self._check_list_length(levels, sizes, \"sizes\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 437, "name": "numeric_mapping", "kind": "def", "category": "function", "info": " def numeric_mapping(self, data, sizes, norm):\n\n if isinstance(sizes, dict):\n # The presence of a norm object overrides a dictionary of sizes\n # in specifying a numeric mapping, so we need to process it\n # dictionary here\n levels = list(np.sort(list(sizes)))\n size_values = sizes.values()\n size_range = min(size_values), max(size_values)\n\n else:\n\n # The levels here will be the unique values in the data\n levels = list(np.sort(remove_na(data.unique())))\n\n if isinstance(sizes, tuple):\n\n # For numeric inputs, the size can be parametrized by\n # the minimum and maximum artist values to map to. The\n # norm object that gets set up next specifies how to\n # do the mapping.\n\n if len(sizes) != 2:\n err = \"A `sizes` tuple must have only 2 values\"\n raise ValueError(err)\n\n size_range = sizes\n\n elif sizes is not None:\n\n err = f\"Value for `sizes` not understood: {sizes}\"\n raise ValueError(err)\n\n else:\n\n # When not provided, we get the size range from the plotter\n # object we are attached to. See the note in the categorical\n # method about how this is suboptimal for future development.\n size_range = self.plotter._default_size_range\n\n # Now that we know the minimum and maximum sizes that will get drawn,\n # we need to map the data values that we have into that range. We will\n # use a matplotlib Normalize class, which is typically used for numeric\n # color mapping but works fine here too. It takes data values and maps\n # them into a [0, 1] interval, potentially nonlinear-ly.\n\n if norm is None:\n # Default is a linear function between the min and max data values\n norm = mpl.colors.Normalize()\n elif isinstance(norm, tuple):\n # It is also possible to give different limits in data space\n norm = mpl.colors.Normalize(*norm)\n elif not isinstance(norm, mpl.colors.Normalize):\n err = f\"Value for size `norm` parameter not understood: {norm}\"\n raise ValueError(err)\n else:\n # If provided with Normalize object, copy it so we can modify\n norm = copy(norm)\n\n # Set the mapping so all output values are in [0, 1]\n norm.clip = True\n\n # If the input range is not set, use the full range of the data\n if not norm.scaled():\n norm(levels)\n\n # Map from data values to [0, 1] range\n sizes_scaled = norm(levels)\n\n # Now map from the scaled range into the artist units\n if isinstance(sizes, dict):\n lookup_table = sizes\n else:\n lo, hi = size_range\n sizes = lo + sizes_scaled * (hi - lo)\n lookup_table = dict(zip(levels, sizes))\n\n return levels, lookup_table, norm, size_range\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 450, "name": "remove_na", "kind": "ref", "category": "function", "info": " levels = list(np.sort(remove_na(data.unique())))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 485, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 488, "name": "Normalize", "kind": "ref", "category": "function", "info": " norm = mpl.colors.Normalize(*norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 500, "name": "scaled", "kind": "ref", "category": "function", "info": " if not norm.scaled():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 501, "name": "norm", "kind": "ref", "category": "function", "info": " norm(levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 504, "name": "norm", "kind": "ref", "category": "function", "info": " sizes_scaled = norm(levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 518, "name": "StyleMapping", "kind": "def", "category": "class", "info": "__init__\t_lookup_single\t_map_attributes"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 541, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data) == \"datetime\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 545, "name": "categorical_order", "kind": "ref", "category": "function", "info": " levels = categorical_order(data, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 547, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " markers = self._map_attributes(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 548, "name": "unique_markers", "kind": "ref", "category": "function", "info": " markers, levels, unique_markers(len(levels)), \"markers\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 550, "name": "_map_attributes", "kind": "ref", "category": "function", "info": " dashes = self._map_attributes(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 551, "name": "unique_dashes", "kind": "ref", "category": "function", "info": " dashes, levels, unique_dashes(len(levels)), \"dashes\",\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 559, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 560, "name": "get_path", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 560, "name": "transformed", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 560, "name": "get_transform", "kind": "ref", "category": "function", "info": " paths[k] = m.get_path().transformed(m.get_transform())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 561, "name": "is_filled", "kind": "ref", "category": "function", "info": " filled_markers.append(m.is_filled())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 584, "name": "_lookup_single", "kind": "def", "category": "function", "info": " def _lookup_single(self, key, attr=None):\n \"\"\"Get attribute(s) for a given data point.\"\"\"\n if attr is None:\n value = self.lookup_table[key]\n else:\n value = self.lookup_table[key][attr]\n return value\n\n def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n arg = self._check_list_length(levels, arg, attr)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 592, "name": "_map_attributes", "kind": "def", "category": "function", "info": " def _map_attributes(self, arg, levels, defaults, attr):\n \"\"\"Handle the specification for a given style attribute.\"\"\"\n if arg is True:\n lookup_table = dict(zip(levels, defaults))\n elif isinstance(arg, dict):\n missing = set(levels) - set(arg)\n if missing:\n err = f\"These `{attr}` levels are missing values: {missing}\"\n raise ValueError(err)\n lookup_table = arg\n elif isinstance(arg, Sequence):\n arg = self._check_list_length(levels, arg, attr)\n lookup_table = dict(zip(levels, arg))\n elif arg:\n err = f\"This `{attr}` argument was not understood: {arg}\"\n raise ValueError(err)\n else:\n lookup_table = {}\n\n return lookup_table\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 603, "name": "_check_list_length", "kind": "ref", "category": "function", "info": " arg = self._check_list_length(levels, arg, attr)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 617, "name": "VectorPlotter", "kind": "def", "category": "class", "info": "__init__\tget_semantics\thas_xy_data\tvar_levels\tassign_variables\t_assign_variables_wideform\t_assign_variables_longform\titer_data\tcomp_data\t_get_axes\t_attach\t_log_scaled\t_add_axis_labels\tscale_native\tscale_numeric\tscale_datetime\tscale_categorical"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 644, "name": "assign_variables", "kind": "ref", "category": "function", "info": " self.assign_variables(data, variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 656, "name": "get_semantics", "kind": "def", "category": "function", "info": " def get_semantics(cls, kwargs, semantics=None):\n \"\"\"Subset a dictionary arguments with known semantic variables.\"\"\"\n # TODO this should be get_variables since we have included x and y\n if semantics is None:\n semantics = cls.semantics\n variables = {}\n for key, val in kwargs.items():\n if key in semantics and val is not None:\n variables[key] = val\n return variables\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 668, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n return bool({\"x\", \"y\"} & set(self.variables))\n\n @property\n def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 673, "name": "var_levels", "kind": "def", "category": "function", "info": " def var_levels(self):\n \"\"\"Property interface to ordered list of variables levels.\n\n Each time it's accessed, it updates the var_levels dictionary with the\n list of levels in the current semantic mappers. But it also allows the\n dictionary to persist, so it can be used to set levels by a key. This is\n used to track the list of col/row levels using an attached FacetGrid\n object, but it's kind of messy and ideally fixed by improving the\n faceting logic so it interfaces better with the modern approach to\n tracking plot variables.\n\n \"\"\"\n for var in self.variables:\n try:\n map_obj = getattr(self, f\"_{var}_map\")\n self._var_levels[var] = map_obj.levels\n except AttributeError:\n pass\n return self._var_levels\n\n def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 693, "name": "assign_variables", "kind": "def", "category": "function", "info": " def assign_variables(self, data=None, variables={}):\n \"\"\"Define plot variables, optionally using lookup from `data`.\"\"\"\n x = variables.get(\"x\", None)\n y = variables.get(\"y\", None)\n\n if x is None and y is None:\n self.input_format = \"wide\"\n plot_data, variables = self._assign_variables_wideform(\n data, **variables,\n )\n else:\n self.input_format = \"long\"\n plot_data, variables = self._assign_variables_longform(\n data, **variables,\n )\n\n self.plot_data = plot_data\n self.variables = variables\n self.var_types = {\n v: variable_type(\n plot_data[v],\n boolean_type=\"numeric\" if v in \"xy\" else \"categorical\"\n )\n for v in variables\n }\n\n return self\n\n def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 700, "name": "_assign_variables_wideform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_wideform(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 705, "name": "_assign_variables_longform", "kind": "ref", "category": "function", "info": " plot_data, variables = self._assign_variables_longform(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 712, "name": "variable_type", "kind": "ref", "category": "function", "info": " v: variable_type(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 721, "name": "_assign_variables_wideform", "kind": "def", "category": "function", "info": " def _assign_variables_wideform(self, data=None, **kwargs):\n \"\"\"Define plot variables given wide-form data.\n\n Parameters\n ----------\n data : flat vector or collection of vectors\n Data can be a vector or mapping that is coerceable to a Series\n or a sequence- or mapping-based collection of such vectors, or a\n rectangular numpy array, or a Pandas DataFrame.\n kwargs : variable -> data mappings\n Behavior with keyword arguments is currently undefined.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n \"\"\"\n # Raise if semantic or other variables are assigned in wide-form mode\n assigned = [k for k, v in kwargs.items() if v is not None]\n if any(assigned):\n s = \"s\" if len(assigned) > 1 else \"\"\n err = f\"The following variable{s} cannot be assigned with wide-form data: \"\n err += \", \".join(f\"`{v}`\" for v in assigned)\n raise ValueError(err)\n\n # Determine if the data object actually has any data in it\n empty = data is None or not len(data)\n\n # Then, determine if we have \"flat\" data (a single vector)\n if isinstance(data, dict):\n values = data.values()\n else:\n values = np.atleast_1d(np.asarray(data, dtype=object))\n flat = not any(\n isinstance(v, Iterable) and not isinstance(v, (str, bytes))\n for v in values\n )\n\n if empty:\n\n # Make an object with the structure of plot_data, but empty\n plot_data = pd.DataFrame()\n variables = {}\n\n elif flat:\n\n # Handle flat data by converting to pandas Series and using the\n # index and/or values to define x and/or y\n # (Could be accomplished with a more general to_series() interface)\n flat_data = pd.Series(data).copy()\n names = {\n \"@values\": flat_data.name,\n \"@index\": flat_data.index.name\n }\n\n plot_data = {}\n variables = {}\n\n for var in [\"x\", \"y\"]:\n if var in self.flat_structure:\n attr = self.flat_structure[var]\n plot_data[var] = getattr(flat_data, attr[1:])\n variables[var] = names[self.flat_structure[var]]\n\n plot_data = pd.DataFrame(plot_data)\n\n else:\n\n # Otherwise assume we have some collection of vectors.\n\n # Handle Python sequences such that entries end up in the columns,\n # not in the rows, of the intermediate wide DataFrame.\n # One way to accomplish this is to convert to a dict of Series.\n if isinstance(data, Sequence):\n data_dict = {}\n for i, var in enumerate(data):\n key = getattr(var, \"name\", i)\n # TODO is there a safer/more generic way to ensure Series?\n # sort of like np.asarray, but for pandas?\n data_dict[key] = pd.Series(var)\n\n data = data_dict\n\n # Pandas requires that dict values either be Series objects\n # or all have the same length, but we want to allow \"ragged\" inputs\n if isinstance(data, Mapping):\n data = {key: pd.Series(val) for key, val in data.items()}\n\n # Otherwise, delegate to the pandas DataFrame constructor\n # This is where we'd prefer to use a general interface that says\n # \"give me this data as a pandas DataFrame\", so we can accept\n # DataFrame objects from other libraries\n wide_data = pd.DataFrame(data, copy=True)\n\n # At this point we should reduce the dataframe to numeric cols\n numeric_cols = [\n k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n ]\n wide_data = wide_data[numeric_cols]\n\n # Now melt the data to long form\n melt_kws = {\"var_name\": \"@columns\", \"value_name\": \"@values\"}\n use_index = \"@index\" in self.wide_structure.values()\n if use_index:\n melt_kws[\"id_vars\"] = \"@index\"\n try:\n orig_categories = wide_data.columns.categories\n orig_ordered = wide_data.columns.ordered\n wide_data.columns = wide_data.columns.add_categories(\"@index\")\n except AttributeError:\n category_columns = False\n else:\n category_columns = True\n wide_data[\"@index\"] = wide_data.index.to_series()\n\n plot_data = wide_data.melt(**melt_kws)\n\n if use_index and category_columns:\n plot_data[\"@columns\"] = pd.Categorical(plot_data[\"@columns\"],\n orig_categories,\n orig_ordered)\n\n # Assign names corresponding to plot semantics\n for var, attr in self.wide_structure.items():\n plot_data[var] = plot_data[attr]\n\n # Define the variable names\n variables = {}\n for var, attr in self.wide_structure.items():\n obj = getattr(wide_data, attr[1:])\n variables[var] = getattr(obj, \"name\", None)\n\n # Remove redundant columns from plot_data\n plot_data = plot_data[list(variables)]\n\n return plot_data, variables\n\n def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 822, "name": "variable_type", "kind": "ref", "category": "function", "info": " k for k, v in wide_data.items() if variable_type(v) == \"numeric\"\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 834, "name": "add_categories", "kind": "ref", "category": "function", "info": " wide_data.columns = wide_data.columns.add_categories(\"@index\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 839, "name": "to_series", "kind": "ref", "category": "function", "info": " wide_data[\"@index\"] = wide_data.index.to_series()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 863, "name": "_assign_variables_longform", "kind": "def", "category": "function", "info": " def _assign_variables_longform(self, data=None, **kwargs):\n \"\"\"Define plot variables given long-form data and/or vector inputs.\n\n Parameters\n ----------\n data : dict-like collection of vectors\n Input data where variable names map to vector values.\n kwargs : variable -> data mappings\n Keys are seaborn variables (x, y, hue, ...) and values are vectors\n in any format that can construct a :class:`pandas.DataFrame` or\n names of columns or index levels in ``data``.\n\n Returns\n -------\n plot_data : :class:`pandas.DataFrame`\n Long-form data object mapping seaborn variables (x, y, hue, ...)\n to data vectors.\n variables : dict\n Keys are defined seaborn variables; values are names inferred from\n the inputs (or None when no name can be determined).\n\n Raises\n ------\n ValueError\n When variables are strings that don't appear in ``data``.\n\n \"\"\"\n plot_data = {}\n variables = {}\n\n # Data is optional; all variables can be defined as vectors\n if data is None:\n data = {}\n\n # TODO should we try a data.to_dict() or similar here to more\n # generally accept objects with that interface?\n # Note that dict(df) also works for pandas, and gives us what we\n # want, whereas DataFrame.to_dict() gives a nested dict instead of\n # a dict of series.\n\n # Variables can also be extracted from the index attribute\n # TODO is this the most general way to enable it?\n # There is no index.to_dict on multiindex, unfortunately\n try:\n index = data.index.to_frame()\n except AttributeError:\n index = {}\n\n # The caller will determine the order of variables in plot_data\n for key, val in kwargs.items():\n\n # First try to treat the argument as a key for the data collection.\n # But be flexible about what can be used as a key.\n # Usually it will be a string, but allow numbers or tuples too when\n # taking from the main data object. Only allow strings to reference\n # fields in the index, because otherwise there is too much ambiguity.\n try:\n val_as_data_key = (\n val in data\n or (isinstance(val, (str, bytes)) and val in index)\n )\n except (KeyError, TypeError):\n val_as_data_key = False\n\n if val_as_data_key:\n\n # We know that __getitem__ will work\n\n if val in data:\n plot_data[key] = data[val]\n elif val in index:\n plot_data[key] = index[val]\n variables[key] = val\n\n elif isinstance(val, (str, bytes)):\n\n # This looks like a column name but we don't know what it means!\n\n err = f\"Could not interpret value `{val}` for parameter `{key}`\"\n raise ValueError(err)\n\n else:\n\n # Otherwise, assume the value is itself data\n\n # Raise when data object is present and a vector can't matched\n if isinstance(data, pd.DataFrame) and not isinstance(val, pd.Series):\n if np.ndim(val) and len(data) != len(val):\n val_cls = val.__class__.__name__\n err = (\n f\"Length of {val_cls} vectors must match length of `data`\"\n f\" when both are used, but `data` has length {len(data)}\"\n f\" and the vector passed to `{key}` has length {len(val)}.\"\n )\n raise ValueError(err)\n\n plot_data[key] = val\n\n # Try to infer the name of the variable\n variables[key] = getattr(val, \"name\", None)\n\n # Construct a tidy plot DataFrame. This will convert a number of\n # types automatically, aligning on index in case of pandas objects\n plot_data = pd.DataFrame(plot_data)\n\n # Reduce the variables dictionary to fields with valid data\n variables = {\n var: name\n for var, name in variables.items()\n if plot_data[var].notnull().any()\n }\n\n return plot_data, variables\n\n def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 907, "name": "to_frame", "kind": "ref", "category": "function", "info": " index = data.index.to_frame()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 977, "name": "iter_data", "kind": "def", "category": "function", "info": " def iter_data(\n self, grouping_vars=None, *,\n reverse=False, from_comp_data=False,\n by_facet=True, allow_empty=False, dropna=True,\n ):\n \"\"\"Generator for getting subsets of data defined by semantic variables.\n\n Also injects \"col\" and \"row\" into grouping semantics.\n\n Parameters\n ----------\n grouping_vars : string or list of strings\n Semantic variables that define the subsets of data.\n reverse : bool\n If True, reverse the order of iteration.\n from_comp_data : bool\n If True, use self.comp_data rather than self.plot_data\n by_facet : bool\n If True, add faceting variables to the set of grouping variables.\n allow_empty : bool\n If True, yield an empty dataframe when no observations exist for\n combinations of grouping variables.\n dropna : bool\n If True, remove rows with missing data.\n\n Yields\n ------\n sub_vars : dict\n Keys are semantic names, values are the level of that semantic.\n sub_data : :class:`pandas.DataFrame`\n Subset of ``plot_data`` for this combination of semantic values.\n\n \"\"\"\n # TODO should this default to using all (non x/y?) semantics?\n # or define grouping vars somewhere?\n if grouping_vars is None:\n grouping_vars = []\n elif isinstance(grouping_vars, str):\n grouping_vars = [grouping_vars]\n elif isinstance(grouping_vars, tuple):\n grouping_vars = list(grouping_vars)\n\n # Always insert faceting variables\n if by_facet:\n facet_vars = {\"col\", \"row\"}\n grouping_vars.extend(\n facet_vars & set(self.variables) - set(grouping_vars)\n )\n\n # Reduce to the semantics used in this plot\n grouping_vars = [\n var for var in grouping_vars if var in self.variables\n ]\n\n if from_comp_data:\n data = self.comp_data\n else:\n data = self.plot_data\n\n if dropna:\n data = data.dropna()\n\n levels = self.var_levels.copy()\n if from_comp_data:\n for axis in {\"x\", \"y\"} & set(grouping_vars):\n if self.var_types[axis] == \"categorical\":\n if self._var_ordered[axis]:\n # If the axis is ordered, then the axes in a possible\n # facet grid are by definition \"shared\", or there is a\n # single axis with a unique cat -> idx mapping.\n # So we can just take the first converter object.\n converter = self.converters[axis].iloc[0]\n levels[axis] = converter.convert_units(levels[axis])\n else:\n # Otherwise, the mappings may not be unique, but we can\n # use the unique set of index values in comp_data.\n levels[axis] = np.sort(data[axis].unique())\n elif self.var_types[axis] == \"datetime\":\n levels[axis] = mpl.dates.date2num(levels[axis])\n elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n levels[axis] = np.log10(levels[axis])\n\n if grouping_vars:\n\n grouped_data = data.groupby(\n grouping_vars, sort=False, as_index=False\n )\n\n grouping_keys = []\n for var in grouping_vars:\n grouping_keys.append(levels.get(var, []))\n\n iter_keys = itertools.product(*grouping_keys)\n if reverse:\n iter_keys = reversed(list(iter_keys))\n\n for key in iter_keys:\n\n # Pandas fails with singleton tuple inputs\n pd_key = key[0] if len(key) == 1 else key\n\n try:\n data_subset = grouped_data.get_group(pd_key)\n except KeyError:\n # XXX we are adding this to allow backwards compatibility\n # with the empty artists that old categorical plots would\n # add (before 0.12), which we may decide to break, in which\n # case this option could be removed\n data_subset = data.loc[[]]\n\n if data_subset.empty and not allow_empty:\n continue\n\n sub_vars = dict(zip(grouping_vars, key))\n\n yield sub_vars, data_subset.copy()\n\n else:\n\n yield {}, data.copy()\n\n @property\n def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1037, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1049, "name": "convert_units", "kind": "ref", "category": "function", "info": " levels[axis] = converter.convert_units(levels[axis])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1055, "name": "date2num", "kind": "ref", "category": "function", "info": " levels[axis] = mpl.dates.date2num(levels[axis])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1056, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " elif self.var_types[axis] == \"numeric\" and self._log_scaled(axis):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1079, "name": "get_group", "kind": "ref", "category": "function", "info": " data_subset = grouped_data.get_group(pd_key)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1099, "name": "comp_data", "kind": "def", "category": "function", "info": " def comp_data(self):\n \"\"\"Dataframe with numeric x and y, after unit conversion and log scaling.\"\"\"\n if not hasattr(self, \"ax\"):\n # Probably a good idea, but will need a bunch of tests updated\n # Most of these tests should just use the external interface\n # Then this can be re-enabled.\n # raise AttributeError(\"No Axes attached to plotter\")\n return self.plot_data\n\n if not hasattr(self, \"_comp_data\"):\n\n comp_data = (\n self.plot_data\n .copy(deep=False)\n .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n )\n\n for var in \"yx\":\n if var not in self.variables:\n continue\n\n parts = []\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, orig in grouped:\n with pd.option_context('mode.use_inf_as_na', True):\n orig = orig.dropna()\n if var in self.var_levels:\n # TODO this should happen in some centralized location\n # it is similar to GH2419, but more complicated because\n # supporting `order` in categorical plots is tricky\n orig = orig[orig.isin(self.var_levels[var])]\n comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n if converter.get_scale() == \"log\":\n comp = np.log10(comp)\n parts.append(pd.Series(comp, orig.index, name=orig.name))\n if parts:\n comp_col = pd.concat(parts)\n else:\n comp_col = pd.Series(dtype=float, name=var)\n comp_data.insert(0, var, comp_col)\n\n self._comp_data = comp_data\n\n return self._comp_data\n\n def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1113, "name": "drop", "kind": "ref", "category": "function", "info": " .drop([\"x\", \"y\"], axis=1, errors=\"ignore\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1124, "name": "dropna", "kind": "ref", "category": "function", "info": " orig = orig.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1130, "name": "convert_units", "kind": "ref", "category": "function", "info": " comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1130, "name": "astype", "kind": "ref", "category": "function", "info": " comp = pd.to_numeric(converter.convert_units(orig)).astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1131, "name": "get_scale", "kind": "ref", "category": "function", "info": " if converter.get_scale() == \"log\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1144, "name": "_get_axes", "kind": "def", "category": "function", "info": " def _get_axes(self, sub_vars):\n \"\"\"Return an Axes object based on existence of row/col variables.\"\"\"\n row = sub_vars.get(\"row\", None)\n col = sub_vars.get(\"col\", None)\n if row is not None and col is not None:\n return self.facets.axes_dict[(row, col)]\n elif row is not None:\n return self.facets.axes_dict[row]\n elif col is not None:\n return self.facets.axes_dict[col]\n elif self.ax is None:\n return self.facets.ax\n else:\n return self.ax\n\n def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1159, "name": "_attach", "kind": "def", "category": "function", "info": " def _attach(\n self,\n obj,\n allowed_types=None,\n log_scale=None,\n ):\n \"\"\"Associate the plotter with an Axes manager and initialize its units.\n\n Parameters\n ----------\n obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`\n Structural object that we will eventually plot onto.\n allowed_types : str or list of str\n If provided, raise when either the x or y variable does not have\n one of the declared seaborn types.\n log_scale : bool, number, or pair of bools or numbers\n If not False, set the axes to use log scaling, with the given\n base or defaulting to 10. If a tuple, interpreted as separate\n arguments for the x and y axes.\n\n \"\"\"\n from .axisgrid import FacetGrid\n if isinstance(obj, FacetGrid):\n self.ax = None\n self.facets = obj\n ax_list = obj.axes.flatten()\n if obj.col_names is not None:\n self.var_levels[\"col\"] = obj.col_names\n if obj.row_names is not None:\n self.var_levels[\"row\"] = obj.row_names\n else:\n self.ax = obj\n self.facets = None\n ax_list = [obj]\n\n # Identify which \"axis\" variables we have defined\n axis_variables = set(\"xy\").intersection(self.variables)\n\n # -- Verify the types of our x and y variables here.\n # This doesn't really make complete sense being here here, but it's a fine\n # place for it, given the current system.\n # (Note that for some plots, there might be more complicated restrictions)\n # e.g. the categorical plots have their own check that as specific to the\n # non-categorical axis.\n if allowed_types is None:\n allowed_types = [\"numeric\", \"datetime\", \"categorical\"]\n elif isinstance(allowed_types, str):\n allowed_types = [allowed_types]\n\n for var in axis_variables:\n var_type = self.var_types[var]\n if var_type not in allowed_types:\n err = (\n f\"The {var} variable is {var_type}, but one of \"\n f\"{allowed_types} is required\"\n )\n raise TypeError(err)\n\n # -- Get axis objects for each row in plot_data for type conversions and scaling\n\n facet_dim = {\"x\": \"col\", \"y\": \"row\"}\n\n self.converters = {}\n for var in axis_variables:\n other_var = {\"x\": \"y\", \"y\": \"x\"}[var]\n\n converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)\n share_state = getattr(self.facets, f\"_share{var}\", True)\n\n # Simplest cases are that we have a single axes, all axes are shared,\n # or sharing is only on the orthogonal facet dimension. In these cases,\n # all datapoints get converted the same way, so use the first axis\n if share_state is True or share_state == facet_dim[other_var]:\n converter.loc[:] = getattr(ax_list[0], f\"{var}axis\")\n\n else:\n\n # Next simplest case is when no axes are shared, and we can\n # use the axis objects within each facet\n if share_state is False:\n for axes_vars, axes_data in self.iter_data():\n ax = self._get_axes(axes_vars)\n converter.loc[axes_data.index] = getattr(ax, f\"{var}axis\")\n\n # In the more complicated case, the axes are shared within each\n # \"file\" of the facetgrid. In that case, we need to subset the data\n # for that file and assign it the first axis in the slice of the grid\n else:\n\n names = getattr(self.facets, f\"{share_state}_names\")\n for i, level in enumerate(names):\n idx = (i, 0) if share_state == \"row\" else (0, i)\n axis = getattr(self.facets.axes[idx], f\"{var}axis\")\n converter.loc[self.plot_data[share_state] == level] = axis\n\n # Store the converter vector, which we use elsewhere (e.g comp_data)\n self.converters[var] = converter\n\n # Now actually update the matplotlib objects to do the conversion we want\n grouped = self.plot_data[var].groupby(self.converters[var], sort=False)\n for converter, seed_data in grouped:\n if self.var_types[var] == \"categorical\":\n if self._var_ordered[var]:\n order = self.var_levels[var]\n else:\n order = None\n seed_data = categorical_order(seed_data, order)\n converter.update_units(seed_data)\n\n # -- Set numerical axis scales\n\n # First unpack the log_scale argument\n if log_scale is None:\n scalex = scaley = False\n else:\n # Allow single value or x, y tuple\n try:\n scalex, scaley = log_scale\n except TypeError:\n scalex = log_scale if \"x\" in self.variables else False\n scaley = log_scale if \"y\" in self.variables else False\n\n # Now use it\n for axis, scale in zip(\"xy\", (scalex, scaley)):\n if scale:\n for ax in ax_list:\n set_scale = getattr(ax, f\"set_{axis}scale\")\n if scale is True:\n set_scale(\"log\")\n else:\n set_scale(\"log\", base=scale)\n\n # For categorical y, we want the \"first\" level to be at the top of the axis\n if self.var_types.get(\"y\", None) == \"categorical\":\n for ax in ax_list:\n try:\n ax.yaxis.set_inverted(True)\n except AttributeError: # mpl < 3.1\n if not ax.yaxis_inverted():\n ax.invert_yaxis()\n\n # TODO -- Add axes labels\n\n def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1184, "name": "flatten", "kind": "ref", "category": "function", "info": " ax_list = obj.axes.flatten()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1239, "name": "iter_data", "kind": "ref", "category": "function", "info": " for axes_vars, axes_data in self.iter_data():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1240, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(axes_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1265, "name": "categorical_order", "kind": "ref", "category": "function", "info": " seed_data = categorical_order(seed_data, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1266, "name": "update_units", "kind": "ref", "category": "function", "info": " converter.update_units(seed_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1287, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1289, "name": "set_scale", "kind": "ref", "category": "function", "info": " set_scale(\"log\", base=scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1295, "name": "set_inverted", "kind": "ref", "category": "function", "info": " ax.yaxis.set_inverted(True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1297, "name": "yaxis_inverted", "kind": "ref", "category": "function", "info": " if not ax.yaxis_inverted():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1298, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1302, "name": "_log_scaled", "kind": "def", "category": "function", "info": " def _log_scaled(self, axis):\n \"\"\"Return True if specified axis is log scaled on all attached axes.\"\"\"\n if not hasattr(self, \"ax\"):\n return False\n\n if self.ax is None:\n axes_list = self.facets.axes.flatten()\n else:\n axes_list = [self.ax]\n\n log_scaled = []\n for ax in axes_list:\n data_axis = getattr(ax, f\"{axis}axis\")\n log_scaled.append(data_axis.get_scale() == \"log\")\n\n if any(log_scaled) and not all(log_scaled):\n raise RuntimeError(\"Axis scaling is not consistent\")\n\n return any(log_scaled)\n\n def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1308, "name": "flatten", "kind": "ref", "category": "function", "info": " axes_list = self.facets.axes.flatten()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1315, "name": "get_scale", "kind": "ref", "category": "function", "info": " log_scaled.append(data_axis.get_scale() == \"log\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1322, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self, ax, default_x=\"\", default_y=\"\"):\n \"\"\"Add axis labels if not present, set visibility to match ticklabels.\"\"\"\n # TODO ax could default to None and use attached axes if present\n # but what to do about the case of facets? Currently using FacetGrid's\n # set_axis_labels method, which doesn't add labels to the interior even\n # when the axes are not shared. Maybe that makes sense?\n if not ax.get_xlabel():\n x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n if not ax.get_ylabel():\n y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n\n # XXX If the scale_* methods are going to modify the plot_data structure, they\n # can't be called twice. That means that if they are called twice, they should\n # raise. Alternatively, we could store an original version of plot_data and each\n # time they are called they operate on the store, not the current state.\n\n def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1328, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " if not ax.get_xlabel():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1329, "name": "get_visible", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1329, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " x_visible = any(t.get_visible() for t in ax.get_xticklabels())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1330, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.variables.get(\"x\", default_x), visible=x_visible)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1331, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " if not ax.get_ylabel():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1332, "name": "get_visible", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1332, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " y_visible = any(t.get_visible() for t in ax.get_yticklabels())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1333, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.variables.get(\"y\", default_y), visible=y_visible)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1340, "name": "scale_native", "kind": "def", "category": "function", "info": " def scale_native(self, axis, *args, **kwargs):\n\n # Default, defer to matplotlib\n\n raise NotImplementedError\n\n def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1346, "name": "scale_numeric", "kind": "def", "category": "function", "info": " def scale_numeric(self, axis, *args, **kwargs):\n\n # Feels needed to completeness, what should it do?\n # Perhaps handle log scaling? Set the ticker/formatter/limits?\n\n raise NotImplementedError\n\n def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1353, "name": "scale_datetime", "kind": "def", "category": "function", "info": " def scale_datetime(self, axis, *args, **kwargs):\n\n # Use pd.to_datetime to convert strings or numbers to datetime objects\n # Note, use day-resolution for numeric->datetime to match matplotlib\n\n raise NotImplementedError\n\n def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1360, "name": "scale_categorical", "kind": "def", "category": "function", "info": " def scale_categorical(self, axis, order=None, formatter=None):\n \"\"\"\n Enforce categorical (fixed-scale) rules for the data on given axis.\n\n Parameters\n ----------\n axis : \"x\" or \"y\"\n Axis of the plot to operate on.\n order : list\n Order that unique values should appear in.\n formatter : callable\n Function mapping values to a string representation.\n\n Returns\n -------\n self\n\n \"\"\"\n # This method both modifies the internal representation of the data\n # (converting it to string) and sets some attributes on self. It might be\n # a good idea to have a separate object attached to self that contains the\n # information in those attributes (i.e. whether to enforce variable order\n # across facets, the order to use) similar to the SemanticMapping objects\n # we have for semantic variables. That object could also hold the converter\n # objects that get used, if we can decouple those from an existing axis\n # (cf. https://github.com/matplotlib/matplotlib/issues/19229).\n # There are some interactions with faceting information that would need\n # to be thought through, since the converts to use depend on facets.\n # If we go that route, these methods could become \"borrowed\" methods similar\n # to what happens with the alternate semantic mapper constructors, although\n # that approach is kind of fussy and confusing.\n\n # TODO this method could also set the grid state? Since we like to have no\n # grid on the categorical axis by default. Again, a case where we'll need to\n # store information until we use it, so best to have a way to collect the\n # attributes that this method sets.\n\n # TODO if we are going to set visual properties of the axes with these methods,\n # then we could do the steps currently in CategoricalPlotter._adjust_cat_axis\n\n # TODO another, and distinct idea, is to expose a cut= param here\n\n _check_argument(\"axis\", [\"x\", \"y\"], axis)\n\n # Categorical plots can be \"univariate\" in which case they get an anonymous\n # category label on the opposite axis.\n if axis not in self.variables:\n self.variables[axis] = None\n self.var_types[axis] = \"categorical\"\n self.plot_data[axis] = \"\"\n\n # If the \"categorical\" variable has a numeric type, sort the rows so that\n # the default result from categorical_order has those values sorted after\n # they have been coerced to strings. The reason for this is so that later\n # we can get facet-wise orders that are correct.\n # XXX Should this also sort datetimes?\n # It feels more consistent, but technically will be a default change\n # If so, should also change categorical_order to behave that way\n if self.var_types[axis] == \"numeric\":\n self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n\n # Now get a reference to the categorical data vector and remove na values\n cat_data = self.plot_data[axis].dropna()\n\n # Get the initial categorical order, which we do before string\n # conversion to respect the original types of the order list.\n # Track whether the order is given explicitly so that we can know\n # whether or not to use the order constructed here downstream\n self._var_ordered[axis] = order is not None or cat_data.dtype.name == \"category\"\n order = pd.Index(categorical_order(cat_data, order), name=axis)\n\n # Then convert data to strings. This is because in matplotlib,\n # \"categorical\" data really mean \"string\" data, so doing this artists\n # will be drawn on the categorical axis with a fixed scale.\n # TODO implement formatter here; check that it returns strings?\n if formatter is not None:\n cat_data = cat_data.map(formatter)\n order = order.map(formatter)\n else:\n cat_data = cat_data.astype(str)\n order = order.astype(str)\n\n # Update the levels list with the type-converted order variable\n self.var_levels[axis] = order\n\n # Now ensure that seaborn will use categorical rules internally\n self.var_types[axis] = \"categorical\"\n\n # Put the string-typed categorical vector back into the plot_data structure\n self.plot_data[axis] = cat_data\n\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1402, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"axis\", [\"x\", \"y\"], axis)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1419, "name": "sort_values", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.sort_values(axis, kind=\"mergesort\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1422, "name": "dropna", "kind": "ref", "category": "function", "info": " cat_data = self.plot_data[axis].dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1429, "name": "categorical_order", "kind": "ref", "category": "function", "info": " order = pd.Index(categorical_order(cat_data, order), name=axis)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1439, "name": "astype", "kind": "ref", "category": "function", "info": " cat_data = cat_data.astype(str)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1440, "name": "astype", "kind": "ref", "category": "function", "info": " order = order.astype(str)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1454, "name": "VariableType", "kind": "def", "category": "class", "info": "__init__\t__eq__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1474, "name": "variable_type", "kind": "def", "category": "function", "info": "def variable_type(vector, boolean_type=\"numeric\"):\n \"\"\"\n Determine whether a vector contains numeric, categorical, or datetime data.\n\n This function differs from the pandas typing API in two ways:\n\n - Python sequences or object-typed PyData objects are considered numeric if\n all of their entries are numeric.\n - String or mixed-type data are considered categorical even if not\n explicitly represented as a :class:`pandas.api.types.CategoricalDtype`.\n\n Parameters\n ----------\n vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence\n Input data to test.\n boolean_type : 'numeric' or 'categorical'\n Type to use for vectors containing only 0s and 1s (and NAs).\n\n Returns\n -------\n var_type : 'numeric', 'categorical', or 'datetime'\n Name identifying the type of data in the vector.\n \"\"\"\n vector = pd.Series(vector)\n\n # If a categorical dtype is set, infer categorical\n if isinstance(vector.dtype, pd.CategoricalDtype):\n return VariableType(\"categorical\")\n\n # Special-case all-na data, which is always \"numeric\"\n if pd.isna(vector).all():\n return VariableType(\"numeric\")\n\n # At this point, drop nans to simplify further type inference\n vector = vector.dropna()\n\n # Special-case binary/boolean data, allow caller to determine\n # This triggers a numpy warning when vector has strings/objects\n # https://github.com/numpy/numpy/issues/6784\n # Because we reduce with .all(), we are agnostic about whether the\n # comparison returns a scalar or vector, so we will ignore the warning.\n # It triggers a separate DeprecationWarning when the vector has datetimes:\n # https://github.com/numpy/numpy/issues/13548\n # This is considered a bug by numpy and will likely go away.\n with warnings.catch_warnings():\n warnings.simplefilter(\n action='ignore', category=(FutureWarning, DeprecationWarning)\n )\n if np.isin(vector, [0, 1]).all():\n return VariableType(boolean_type)\n\n # Defer to positive pandas tests\n if pd.api.types.is_numeric_dtype(vector):\n return VariableType(\"numeric\")\n\n if pd.api.types.is_datetime64_dtype(vector):\n return VariableType(\"datetime\")\n\n # --- If we get to here, we need to check the entries\n\n # Check for a collection where everything is a number\n\n def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1501, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1505, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1508, "name": "dropna", "kind": "ref", "category": "function", "info": " vector = vector.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1523, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(boolean_type)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1526, "name": "is_numeric_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_numeric_dtype(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1527, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1529, "name": "is_datetime64_dtype", "kind": "ref", "category": "function", "info": " if pd.api.types.is_datetime64_dtype(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1530, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1536, "name": "all_numeric", "kind": "def", "category": "function", "info": " def all_numeric(x):\n for x_i in x:\n if not isinstance(x_i, Number):\n return False\n return True\n\n if all_numeric(vector):\n return VariableType(\"numeric\")\n\n # Check for a collection where everything is a datetime\n\n def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1542, "name": "all_numeric", "kind": "ref", "category": "function", "info": " if all_numeric(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1543, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"numeric\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1547, "name": "all_datetime", "kind": "def", "category": "function", "info": " def all_datetime(x):\n for x_i in x:\n if not isinstance(x_i, (datetime, np.datetime64)):\n return False\n return True\n\n if all_datetime(vector):\n return VariableType(\"datetime\")\n\n # Otherwise, our final fallback is to consider things categorical\n\n return VariableType(\"categorical\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1553, "name": "all_datetime", "kind": "ref", "category": "function", "info": " if all_datetime(vector):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1554, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"datetime\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1558, "name": "VariableType", "kind": "ref", "category": "function", "info": " return VariableType(\"categorical\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1561, "name": "infer_orient", "kind": "def", "category": "function", "info": "def infer_orient(x=None, y=None, orient=None, require_numeric=True):\n \"\"\"Determine how the plot should be oriented based on the data.\n\n For historical reasons, the convention is to call a plot \"horizontally\"\n or \"vertically\" oriented based on the axis representing its dependent\n variable. Practically, this is used when determining the axis for\n numerical aggregation.\n\n Parameters\n ----------\n x, y : Vector data or None\n Positional data vectors for the plot.\n orient : string or None\n Specified orientation. If not None, can be \"x\" or \"y\", or otherwise\n must start with \"v\" or \"h\".\n require_numeric : bool\n If set, raise when the implied dependent variable is not numeric.\n\n Returns\n -------\n orient : \"x\" or \"y\"\n\n Raises\n ------\n ValueError: When `orient` is an unknown string.\n TypeError: When dependent variable is not numeric, with `require_numeric`\n\n \"\"\"\n\n x_type = None if x is None else variable_type(x)\n y_type = None if y is None else variable_type(y)\n\n nonnumeric_dv_error = \"{} orientation requires numeric `{}` variable.\"\n single_var_warning = \"{} orientation ignored with only `{}` specified.\"\n\n if x is None:\n if str(orient).startswith(\"h\"):\n warnings.warn(single_var_warning.format(\"Horizontal\", \"y\"))\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"x\"\n\n elif y is None:\n if str(orient).startswith(\"v\"):\n warnings.warn(single_var_warning.format(\"Vertical\", \"x\"))\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"y\"\n\n elif str(orient).startswith(\"v\") or orient == \"x\":\n if require_numeric and y_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Vertical\", \"y\"))\n return \"x\"\n\n elif str(orient).startswith(\"h\") or orient == \"y\":\n if require_numeric and x_type != \"numeric\":\n raise TypeError(nonnumeric_dv_error.format(\"Horizontal\", \"x\"))\n return \"y\"\n\n elif orient is not None:\n err = (\n \"`orient` must start with 'v' or 'h' or be None, \"\n f\"but `{repr(orient)}` was passed.\"\n )\n raise ValueError(err)\n\n elif x_type != \"categorical\" and y_type == \"categorical\":\n return \"y\"\n\n elif x_type != \"numeric\" and y_type == \"numeric\":\n return \"x\"\n\n elif x_type == \"numeric\" and y_type != \"numeric\":\n return \"y\"\n\n elif require_numeric and \"numeric\" not in (x_type, y_type):\n err = \"Neither the `x` nor `y` variable appears to be numeric.\"\n raise TypeError(err)\n\n else:\n return \"x\"\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1590, "name": "variable_type", "kind": "ref", "category": "function", "info": " x_type = None if x is None else variable_type(x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1591, "name": "variable_type", "kind": "ref", "category": "function", "info": " y_type = None if y is None else variable_type(y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1644, "name": "unique_dashes", "kind": "def", "category": "function", "info": "def unique_dashes(n):\n \"\"\"Build an arbitrarily long list of unique dash styles for lines.\n\n Parameters\n ----------\n n : int\n Number of unique dash specs to generate.\n\n Returns\n -------\n dashes : list of strings or tuples\n Valid arguments for the ``dashes`` parameter on\n :class:`matplotlib.lines.Line2D`. The first spec is a solid\n line (``\"\"``), the remainder are sequences of long and short\n dashes.\n\n \"\"\"\n # Start with dash specs that are well distinguishable\n dashes = [\n \"\",\n (4, 1.5),\n (1, 1),\n (3, 1.25, 1.5, 1.25),\n (5, 1, 1, 1),\n ]\n\n # Now programmatically build as many as we need\n p = 3\n while len(dashes) < n:\n\n # Take combinations of long and short dashes\n a = itertools.combinations_with_replacement([3, 1.25], p)\n b = itertools.combinations_with_replacement([4, 1], p)\n\n # Interleave the combinations, reversing one of the streams\n segment_list = itertools.chain(*zip(\n list(a)[1:-1][::-1],\n list(b)[1:-1]\n ))\n\n # Now insert the gaps\n for segments in segment_list:\n gap = min(segments)\n spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))\n dashes.append(spec)\n\n p += 1\n\n return dashes[:n]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1695, "name": "unique_markers", "kind": "def", "category": "function", "info": "def unique_markers(n):\n \"\"\"Build an arbitrarily long list of unique marker styles for points.\n\n Parameters\n ----------\n n : int\n Number of unique marker specs to generate.\n\n Returns\n -------\n markers : list of string or tuples\n Values for defining :class:`matplotlib.markers.MarkerStyle` objects.\n All markers will be filled.\n\n \"\"\"\n # Start with marker specs that are well distinguishable\n markers = [\n \"o\",\n \"X\",\n (4, 0, 45),\n \"P\",\n (4, 0, 0),\n (4, 1, 0),\n \"^\",\n (4, 1, 45),\n \"v\",\n ]\n\n # Now generate more from regular polygons of increasing order\n s = 5\n while len(markers) < n:\n a = 360 / (s + 1) / 2\n markers.extend([\n (s + 1, 1, a),\n (s + 1, 0, a),\n (s, 1, 0),\n (s, 0, 0),\n ])\n s += 1\n\n # Convert to MarkerStyle object, using only exactly what we need\n # markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]\n\n return markers[:n]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1741, "name": "categorical_order", "kind": "def", "category": "function", "info": "def categorical_order(vector, order=None):\n \"\"\"Return a list of unique data values.\n\n Determine an ordered list of levels in ``values``.\n\n Parameters\n ----------\n vector : list, array, Categorical, or Series\n Vector of \"categorical\" values\n order : list-like, optional\n Desired order of category levels to override the order determined\n from the ``values`` object.\n\n Returns\n -------\n order : list\n Ordered list of category levels not including null values.\n\n \"\"\"\n if order is None:\n if hasattr(vector, \"categories\"):\n order = vector.categories\n else:\n try:\n order = vector.cat.categories\n except (TypeError, AttributeError):\n\n try:\n order = vector.unique()\n except AttributeError:\n order = pd.unique(vector)\n\n if variable_type(vector) == \"numeric\":\n order = np.sort(order)\n\n order = filter(pd.notnull, order)\n return list(order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_oldcore.py", "rel_fname": "seaborn/_oldcore.py", "line": 1773, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(vector) == \"numeric\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 40, "name": "KDE", "kind": "def", "category": "class", "info": "__init__\t_define_support_grid\t_define_support_univariate\t_define_support_bivariate\tdefine_support\t_fit\t_eval_univariate\t_eval_bivariate\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 88, "name": "_define_support_grid", "kind": "def", "category": "function", "info": " def _define_support_grid(self, x, bw, cut, clip, gridsize):\n \"\"\"Create the grid of evaluation points depending for vector x.\"\"\"\n clip_lo = -np.inf if clip[0] is None else clip[0]\n clip_hi = +np.inf if clip[1] is None else clip[1]\n gridmin = max(x.min() - bw * cut, clip_lo)\n gridmax = min(x.max() + bw * cut, clip_hi)\n return np.linspace(gridmin, gridmax, gridsize)\n\n def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 96, "name": "_define_support_univariate", "kind": "def", "category": "function", "info": " def _define_support_univariate(self, x, weights):\n \"\"\"Create a 1D grid of evaluation points.\"\"\"\n kde = self._fit(x, weights)\n bw = np.sqrt(kde.covariance.squeeze())\n grid = self._define_support_grid(\n x, bw, self.cut, self.clip, self.gridsize\n )\n return grid\n\n def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 98, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 100, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid = self._define_support_grid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 105, "name": "_define_support_bivariate", "kind": "def", "category": "function", "info": " def _define_support_bivariate(self, x1, x2, weights):\n \"\"\"Create a 2D grid of evaluation points.\"\"\"\n clip = self.clip\n if clip[0] is None or np.isscalar(clip[0]):\n clip = (clip, clip)\n\n kde = self._fit([x1, x2], weights)\n bw = np.sqrt(np.diag(kde.covariance).squeeze())\n\n grid1 = self._define_support_grid(\n x1, bw[0], self.cut, clip[0], self.gridsize\n )\n grid2 = self._define_support_grid(\n x2, bw[1], self.cut, clip[1], self.gridsize\n )\n\n return grid1, grid2\n\n def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 111, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 114, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid1 = self._define_support_grid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 117, "name": "_define_support_grid", "kind": "ref", "category": "function", "info": " grid2 = self._define_support_grid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 123, "name": "define_support", "kind": "def", "category": "function", "info": " def define_support(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Create the evaluation grid for a given data set.\"\"\"\n if x2 is None:\n support = self._define_support_univariate(x1, weights)\n else:\n support = self._define_support_bivariate(x1, x2, weights)\n\n if cache:\n self.support = support\n\n return support\n\n def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 126, "name": "_define_support_univariate", "kind": "ref", "category": "function", "info": " support = self._define_support_univariate(x1, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 128, "name": "_define_support_bivariate", "kind": "ref", "category": "function", "info": " support = self._define_support_bivariate(x1, x2, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 135, "name": "_fit", "kind": "def", "category": "function", "info": " def _fit(self, fit_data, weights=None):\n \"\"\"Fit the scipy kde while adding bw_adjust logic and version check.\"\"\"\n fit_kws = {\"bw_method\": self.bw_method}\n if weights is not None:\n fit_kws[\"weights\"] = weights\n\n kde = gaussian_kde(fit_data, **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 146, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights=None):\n \"\"\"Fit and evaluate a univariate on univariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x, cache=False)\n\n kde = self._fit(x, weights)\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([\n kde.integrate_box_1d(s_0, s_i) for s_i in support\n ])\n else:\n density = kde(support)\n\n return density, support\n\n def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 150, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x, cache=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 152, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(x, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 160, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde(support)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 164, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights=None):\n \"\"\"Fit and evaluate a univariate on bivariate data.\"\"\"\n support = self.support\n if support is None:\n support = self.define_support(x1, x2, cache=False)\n\n kde = self._fit([x1, x2], weights)\n\n if self.cumulative:\n\n grid1, grid2 = support\n density = np.zeros((grid1.size, grid2.size))\n p0 = grid1.min(), grid2.min()\n for i, xi in enumerate(grid1):\n for j, xj in enumerate(grid2):\n density[i, j] = kde.integrate_box(p0, (xi, xj))\n\n else:\n\n xx1, xx2 = np.meshgrid(*support)\n density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n\n return density, support\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Fit and evaluate on univariate or bivariate data.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 168, "name": "define_support", "kind": "ref", "category": "function", "info": " support = self.define_support(x1, x2, cache=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 170, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit([x1, x2], weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 184, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde([xx1.ravel(), xx2.ravel()]).reshape(xx1.shape)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 191, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 193, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 198, "name": "Histogram", "kind": "def", "category": "class", "info": "__init__\t_define_bin_edges\tdefine_bin_params\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 242, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", stat_choices, stat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 253, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, x, weights, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n if binrange is None:\n start, stop = x.min(), x.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n # Handle roundoff error (maybe there is a less clumsy way?)\n if bin_edges.max() < stop or len(bin_edges) < 2:\n bin_edges = np.append(bin_edges, bin_edges.max() + step)\n else:\n bin_edges = np.histogram_bin_edges(\n x, bins, binrange, weights,\n )\n return bin_edges\n\n def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 274, "name": "define_bin_params", "kind": "def", "category": "function", "info": " def define_bin_params(self, x1, x2=None, weights=None, cache=True):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n if x2 is None:\n\n bin_edges = self._define_bin_edges(\n x1, weights, self.bins, self.binwidth, self.binrange, self.discrete,\n )\n\n if isinstance(self.bins, (str, Number)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n else:\n\n bin_edges = []\n for i, x in enumerate([x1, x2]):\n\n # Resolve out whether bin parameters are shared\n # or specific to each variable\n\n bins = self.bins\n if not bins or isinstance(bins, (str, Number)):\n pass\n elif isinstance(bins[i], str):\n bins = bins[i]\n elif len(bins) == 2:\n bins = bins[i]\n\n binwidth = self.binwidth\n if binwidth is None:\n pass\n elif not isinstance(binwidth, Number):\n binwidth = binwidth[i]\n\n binrange = self.binrange\n if binrange is None:\n pass\n elif not isinstance(binrange[0], Number):\n binrange = binrange[i]\n\n discrete = self.discrete\n if not isinstance(discrete, bool):\n discrete = discrete[i]\n\n # Define the bins for this variable\n\n bin_edges.append(self._define_bin_edges(\n x, weights, bins, binwidth, binrange, discrete,\n ))\n\n bin_kws = dict(bins=tuple(bin_edges))\n\n if cache:\n self.bin_kws = bin_kws\n\n return bin_kws\n\n def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 278, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 323, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges.append(self._define_bin_edges(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 334, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 338, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x1, x2, cache=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 352, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 354, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 356, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / area\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 366, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 370, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 378, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 380, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / hist.sum() * 100\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 382, "name": "astype", "kind": "ref", "category": "function", "info": " hist = hist.astype(float) / np.diff(bin_edges)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 395, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 397, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 400, "name": "ECDF", "kind": "def", "category": "class", "info": "__init__\t_eval_bivariate\t_eval_univariate\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 413, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", [\"count\", \"percent\", \"proportion\"], stat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 417, "name": "_eval_bivariate", "kind": "def", "category": "function", "info": " def _eval_bivariate(self, x1, x2, weights):\n \"\"\"Inner function for histogram of two variables.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x1, x2, cache=False)\n\n density = self.stat == \"density\"\n\n hist, *bin_edges = np.histogram2d(\n x1, x2, **bin_kws, weights=weights, density=density\n )\n\n area = np.outer(\n np.diff(bin_edges[0]),\n np.diff(bin_edges[1]),\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / area\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * area).cumsum(axis=0).cumsum(axis=1)\n else:\n hist = hist.cumsum(axis=0).cumsum(axis=1)\n\n return hist, bin_edges\n\n def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 421, "name": "_eval_univariate", "kind": "def", "category": "function", "info": " def _eval_univariate(self, x, weights):\n \"\"\"Inner function for histogram of one variable.\"\"\"\n bin_kws = self.bin_kws\n if bin_kws is None:\n bin_kws = self.define_bin_params(x, weights=weights, cache=False)\n\n density = self.stat == \"density\"\n hist, bin_edges = np.histogram(\n x, **bin_kws, weights=weights, density=density,\n )\n\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / np.diff(bin_edges)\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * np.diff(bin_edges)).cumsum()\n else:\n hist = hist.cumsum()\n\n return hist, bin_edges\n\n def __call__(self, x1, x2=None, weights=None):\n \"\"\"Count the occurrences in each bin, maybe normalize.\"\"\"\n if x2 is None:\n return self._eval_univariate(x1, weights)\n else:\n return self._eval_bivariate(x1, x2, weights)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 450, "name": "_eval_univariate", "kind": "ref", "category": "function", "info": " return self._eval_univariate(x1, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 452, "name": "_eval_bivariate", "kind": "ref", "category": "function", "info": " return self._eval_bivariate(x1, x2, weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 455, "name": "EstimateAggregator", "kind": "def", "category": "class", "info": "__init__\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 475, "name": "_validate_errorbar_arg", "kind": "ref", "category": "function", "info": " method, level = _validate_errorbar_arg(errorbar)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 487, "name": "estimator", "kind": "ref", "category": "function", "info": " estimate = self.estimator(vals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 489, "name": "agg", "kind": "ref", "category": "function", "info": " estimate = vals.agg(self.estimator)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 499, "name": "error_method", "kind": "ref", "category": "function", "info": " err_min, err_max = self.error_method(vals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 506, "name": "sem", "kind": "ref", "category": "function", "info": " half_interval = vals.sem() * self.error_level\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 511, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(vals, self.error_level)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 514, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = bootstrap(vals, units=units, func=self.estimator, **self.boot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 515, "name": "_percentile_interval", "kind": "ref", "category": "function", "info": " err_min, err_max = _percentile_interval(boots, self.error_level)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 520, "name": "_percentile_interval", "kind": "def", "category": "function", "info": "def _percentile_interval(data, width):\n \"\"\"Return a percentile interval from data of a given width.\"\"\"\n edge = (100 - width) / 2\n percentiles = edge, 100 - edge\n return np.nanpercentile(data, percentiles)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 527, "name": "_validate_errorbar_arg", "kind": "def", "category": "function", "info": "def _validate_errorbar_arg(arg):\n \"\"\"Check type and value of errorbar argument and assign default level.\"\"\"\n DEFAULT_LEVELS = {\n \"ci\": 95,\n \"pi\": 95,\n \"se\": 1,\n \"sd\": 1,\n }\n\n usage = \"`errorbar` must be a callable, string, or (string, number) tuple\"\n\n if arg is None:\n return None, None\n elif callable(arg):\n return arg, None\n elif isinstance(arg, str):\n method = arg\n level = DEFAULT_LEVELS.get(method, None)\n else:\n try:\n method, level = arg\n except (ValueError, TypeError) as err:\n raise err.__class__(usage) from err\n\n _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n if level is not None and not isinstance(level, Number):\n raise TypeError(usage)\n\n return method, level\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_statistics.py", "rel_fname": "seaborn/_statistics.py", "line": 551, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"errorbar\", list(DEFAULT_LEVELS), method)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 15, "name": "Agg", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 52, "name": "Est", "kind": "def", "category": "class", "info": "_process\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 84, "name": "_process", "kind": "def", "category": "function", "info": " def _process(\n self, data: DataFrame, var: str, estimator: EstimateAggregator\n ) -> DataFrame:\n # Needed because GroupBy.apply assumes func is DataFrame -> DataFrame\n # which we could probably make more general to allow Series return\n res = estimator(data, var)\n return pd.DataFrame([res])\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n boot_kws = {\"n_boot\": self.n_boot, \"seed\": self.seed}\n engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n\n var = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res = (\n groupby\n .apply(data, self._process, var, engine)\n .dropna(subset=[var])\n .reset_index(drop=True)\n )\n\n res = res.fillna({f\"{var}min\": res[var], f\"{var}max\": res[var]})\n\n return res\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 89, "name": "estimator", "kind": "ref", "category": "function", "info": " res = estimator(data, var)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 97, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " engine = EstimateAggregator(self.func, self.errorbar, **boot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/aggregation.py", "rel_fname": "seaborn/_stats/aggregation.py", "line": 113, "name": "Rolling", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 15, "name": "Stat", "kind": "def", "category": "class", "info": "_check_param_one_of\t_check_grouping_vars\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 32, "name": "_check_param_one_of", "kind": "def", "category": "function", "info": " def _check_param_one_of(self, param: str, options: Iterable[Any]) -> None:\n \"\"\"Raise when parameter value is not one of a specified set.\"\"\"\n value = getattr(self, param)\n if value not in options:\n *most, last = options\n option_str = \", \".join(f\"{x!r}\" for x in most[:-1]) + f\" or {last!r}\"\n err = \" \".join([\n f\"The `{param}` parameter for `{self.__class__.__name__}` must be\",\n f\"one of {option_str}; not {value!r}.\",\n ])\n raise ValueError(err)\n\n def _check_grouping_vars(\n self, param: str, data_vars: list[str], stacklevel: int = 2,\n ) -> None:\n \"\"\"Warn if vars are named in parameter without being present in the data.\"\"\"\n param_vars = getattr(self, param)\n undefined = set(param_vars) - set(data_vars)\n if undefined:\n param = f\"{self.__class__.__name__}.{param}\"\n names = \", \".join(f\"{x!r}\" for x in undefined)\n msg = f\"Undefined variable(s) passed for {param}: {names}.\"\n warnings.warn(msg, stacklevel=stacklevel)\n\n def __call__(\n self,\n data: DataFrame,\n groupby: GroupBy,\n orient: str,\n scales: dict[str, Scale],\n ) -> DataFrame:\n \"\"\"Apply statistical transform to data subgroups and return combined result.\"\"\"\n return data\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/base.py", "rel_fname": "seaborn/_stats/base.py", "line": 44, "name": "_check_grouping_vars", "kind": "def", "category": "function", "info": " def _check_grouping_vars(\n self, param: str, data_vars: list[str], stacklevel: int = 2,\n ) -> None:\n \"\"\"Warn if vars are named in parameter without being present in the data.\"\"\"\n param_vars = getattr(self, param)\n undefined = set(param_vars) - set(data_vars)\n if undefined:\n param = f\"{self.__class__.__name__}.{param}\"\n names = \", \".join(f\"{x!r}\" for x in undefined)\n msg = f\"Undefined variable(s) passed for {param}: {names}.\"\n warnings.warn(msg, stacklevel=stacklevel)\n\n def __call__(\n self,\n data: DataFrame,\n groupby: GroupBy,\n orient: str,\n scales: dict[str, Scale],\n ) -> DataFrame:\n \"\"\"Apply statistical transform to data subgroups and return combined result.\"\"\"\n return data\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 18, "name": "Count", "kind": "def", "category": "class", "info": "__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 49, "name": "Hist", "kind": "def", "category": "class", "info": "__post_init__\t_define_bin_edges\t_define_bin_params\t_get_bins_and_eval\t_eval\t_normalize\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 114, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n stat_options = [\n \"count\", \"density\", \"percent\", \"probability\", \"proportion\", \"frequency\"\n ]\n self._check_param_one_of(\"stat\", stat_options)\n\n def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n vals = vals.dropna()\n\n if binrange is None:\n start, stop = vals.min(), vals.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n else:\n bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n\n # TODO warning or cap on too many bins?\n\n return bin_edges\n\n def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weights, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 119, "name": "_check_param_one_of", "kind": "ref", "category": "function", "info": " self._check_param_one_of(\"stat\", stat_options)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 121, "name": "_define_bin_edges", "kind": "def", "category": "function", "info": " def _define_bin_edges(self, vals, weight, bins, binwidth, binrange, discrete):\n \"\"\"Inner function that takes bin parameters as arguments.\"\"\"\n vals = vals.dropna()\n\n if binrange is None:\n start, stop = vals.min(), vals.max()\n else:\n start, stop = binrange\n\n if discrete:\n bin_edges = np.arange(start - .5, stop + 1.5)\n elif binwidth is not None:\n step = binwidth\n bin_edges = np.arange(start, stop + step, step)\n else:\n bin_edges = np.histogram_bin_edges(vals, bins, binrange, weight)\n\n # TODO warning or cap on too many bins?\n\n return bin_edges\n\n def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weights, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 142, "name": "_define_bin_params", "kind": "def", "category": "function", "info": " def _define_bin_params(self, data, orient, scale_type):\n \"\"\"Given data, return numpy.histogram parameters to define bins.\"\"\"\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n # TODO We'll want this for ordinal / discrete scales too\n # (Do we need discrete as a parameter or just infer from scale?)\n discrete = self.discrete or scale_type == \"nominal\"\n\n bin_edges = self._define_bin_edges(\n vals, weights, self.bins, self.binwidth, self.binrange, discrete,\n )\n\n if isinstance(self.bins, (str, int)):\n n_bins = len(bin_edges) - 1\n bin_range = bin_edges.min(), bin_edges.max()\n bin_kws = dict(bins=n_bins, range=bin_range)\n else:\n bin_kws = dict(bins=bin_edges)\n\n return bin_kws\n\n def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 151, "name": "_define_bin_edges", "kind": "ref", "category": "function", "info": " bin_edges = self._define_bin_edges(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 164, "name": "_get_bins_and_eval", "kind": "def", "category": "function", "info": " def _get_bins_and_eval(self, data, orient, groupby, scale_type):\n\n bin_kws = self._define_bin_params(data, orient, scale_type)\n return groupby.apply(data, self._eval, orient, bin_kws)\n\n def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 166, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 169, "name": "_eval", "kind": "def", "category": "function", "info": " def _eval(self, data, orient, bin_kws):\n\n vals = data[orient]\n weights = data.get(\"weight\", None)\n\n density = self.stat == \"density\"\n hist, edges = np.histogram(vals, **bin_kws, weights=weights, density=density)\n\n width = np.diff(edges)\n center = edges[:-1] + width / 2\n\n return pd.DataFrame({orient: center, \"count\": hist, \"space\": width})\n\n def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 182, "name": "_normalize", "kind": "def", "category": "function", "info": " def _normalize(self, data):\n\n hist = data[\"count\"]\n if self.stat == \"probability\" or self.stat == \"proportion\":\n hist = hist.astype(float) / hist.sum()\n elif self.stat == \"percent\":\n hist = hist.astype(float) / hist.sum() * 100\n elif self.stat == \"frequency\":\n hist = hist.astype(float) / data[\"space\"]\n\n if self.cumulative:\n if self.stat in [\"density\", \"frequency\"]:\n hist = (hist * data[\"space\"]).cumsum()\n else:\n hist = hist.cumsum()\n\n return data.assign(**{self.stat: hist})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n scale_type = scales[orient].__class__.__name__.lower()\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_bins is True:\n bin_kws = self._define_bin_params(data, orient, scale_type)\n data = groupby.apply(data, self._eval, orient, bin_kws)\n else:\n if self.common_bins is False:\n bin_groupby = GroupBy(grouping_vars)\n else:\n bin_groupby = GroupBy(self.common_bins)\n self._check_grouping_vars(\"common_bins\", grouping_vars)\n\n data = bin_groupby.apply(\n data, self._get_bins_and_eval, orient, groupby, scale_type,\n )\n\n if not grouping_vars or self.common_norm is True:\n data = self._normalize(data)\n else:\n if self.common_norm is False:\n norm_groupby = GroupBy(grouping_vars)\n else:\n norm_groupby = GroupBy(self.common_norm)\n self._check_grouping_vars(\"common_norm\", grouping_vars)\n data = norm_groupby.apply(data, self._normalize)\n\n other = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return data.assign(**{other: data[self.stat]})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 207, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = self._define_bin_params(data, orient, scale_type)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 211, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 213, "name": "GroupBy", "kind": "ref", "category": "function", "info": " bin_groupby = GroupBy(self.common_bins)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 214, "name": "_check_grouping_vars", "kind": "ref", "category": "function", "info": " self._check_grouping_vars(\"common_bins\", grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 221, "name": "_normalize", "kind": "ref", "category": "function", "info": " data = self._normalize(data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 224, "name": "GroupBy", "kind": "ref", "category": "function", "info": " norm_groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 226, "name": "GroupBy", "kind": "ref", "category": "function", "info": " norm_groupby = GroupBy(self.common_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/counting.py", "rel_fname": "seaborn/_stats/counting.py", "line": 227, "name": "_check_grouping_vars", "kind": "ref", "category": "function", "info": " self._check_grouping_vars(\"common_norm\", grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 21, "name": "KDE", "kind": "def", "category": "class", "info": "__post_init__\t_check_var_list_or_boolean\t_fit\t_get_support\t_fit_and_evaluate\t_transform\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 93, "name": "__post_init__", "kind": "def", "category": "function", "info": " def __post_init__(self):\n\n if self.cumulative and _no_scipy:\n raise RuntimeError(\"Cumulative KDE evaluation requires scipy\")\n\n def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None:\n \"\"\"Do input checks on grouping parameters.\"\"\"\n value = getattr(self, param)\n if not (\n isinstance(value, bool)\n or (isinstance(value, list) and all(isinstance(v, str) for v in value))\n ):\n param_name = f\"{self.__class__.__name__}.{param}\"\n raise TypeError(f\"{param_name} must be a boolean or list of strings.\")\n self._check_grouping_vars(param, grouping_vars, stacklevel=3)\n\n def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n \"\"\"Fit and return a KDE object.\"\"\"\n # TODO need to handle singular data\n\n fit_kws: dict[str, Any] = {\"bw_method\": self.bw_method}\n if \"weight\" in data:\n fit_kws[\"weights\"] = data[\"weight\"]\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 98, "name": "_check_var_list_or_boolean", "kind": "def", "category": "function", "info": " def _check_var_list_or_boolean(self, param: str, grouping_vars: Any) -> None:\n \"\"\"Do input checks on grouping parameters.\"\"\"\n value = getattr(self, param)\n if not (\n isinstance(value, bool)\n or (isinstance(value, list) and all(isinstance(v, str) for v in value))\n ):\n param_name = f\"{self.__class__.__name__}.{param}\"\n raise TypeError(f\"{param_name} must be a boolean or list of strings.\")\n self._check_grouping_vars(param, grouping_vars, stacklevel=3)\n\n def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n \"\"\"Fit and return a KDE object.\"\"\"\n # TODO need to handle singular data\n\n fit_kws: dict[str, Any] = {\"bw_method\": self.bw_method}\n if \"weight\" in data:\n fit_kws[\"weights\"] = data[\"weight\"]\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 107, "name": "_check_grouping_vars", "kind": "ref", "category": "function", "info": " self._check_grouping_vars(param, grouping_vars, stacklevel=3)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 109, "name": "_fit", "kind": "def", "category": "function", "info": " def _fit(self, data: DataFrame, orient: str) -> gaussian_kde:\n \"\"\"Fit and return a KDE object.\"\"\"\n # TODO need to handle singular data\n\n fit_kws: dict[str, Any] = {\"bw_method\": self.bw_method}\n if \"weight\" in data:\n fit_kws[\"weights\"] = data[\"weight\"]\n kde = gaussian_kde(data[orient], **fit_kws)\n kde.set_bandwidth(kde.factor * self.bw_adjust)\n\n return kde\n\n def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 121, "name": "_get_support", "kind": "def", "category": "function", "info": " def _get_support(self, data: DataFrame, orient: str) -> ndarray:\n \"\"\"Define the grid that the KDE will be evaluated on.\"\"\"\n if self.gridsize is None:\n return data[orient].to_numpy()\n\n kde = self._fit(data, orient)\n bw = np.sqrt(kde.covariance.squeeze())\n gridmin = data[orient].min() - bw * self.cut\n gridmax = data[orient].max() + bw * self.cut\n return np.linspace(gridmin, gridmax, self.gridsize)\n\n def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 126, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(data, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 132, "name": "_fit_and_evaluate", "kind": "def", "category": "function", "info": " def _fit_and_evaluate(\n self, data: DataFrame, orient: str, support: ndarray\n ) -> DataFrame:\n \"\"\"Transform single group by fitting a KDE and evaluating on a support grid.\"\"\"\n empty = pd.DataFrame(columns=[orient, \"weight\", \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n kde = self._fit(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n if self.cumulative:\n s_0 = support[0]\n density = np.array([kde.integrate_box_1d(s_0, s_i) for s_i in support])\n else:\n density = kde(support)\n\n weight = data[\"weight\"].sum()\n return pd.DataFrame({orient: support, \"weight\": weight, \"density\": density})\n\n def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 140, "name": "_fit", "kind": "ref", "category": "function", "info": " kde = self._fit(data, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 148, "name": "kde", "kind": "ref", "category": "function", "info": " density = kde(support)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 153, "name": "_transform", "kind": "def", "category": "function", "info": " def _transform(\n self, data: DataFrame, orient: str, grouping_vars: list[str]\n ) -> DataFrame:\n \"\"\"Transform multiple groups by fitting KDEs and evaluating.\"\"\"\n empty = pd.DataFrame(columns=[*data.columns, \"density\"], dtype=float)\n if len(data) < 2:\n return empty\n try:\n support = self._get_support(data, orient)\n except np.linalg.LinAlgError:\n return empty\n\n grouping_vars = [x for x in grouping_vars if data[x].nunique() > 1]\n if not grouping_vars:\n return self._fit_and_evaluate(data, orient, support)\n groupby = GroupBy(grouping_vars)\n return groupby.apply(data, self._fit_and_evaluate, orient, support)\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n if \"weight\" not in data:\n data = data.assign(weight=1)\n data = data.dropna(subset=[orient, \"weight\"])\n\n # Transform each group separately\n grouping_vars = [str(v) for v in data if v in groupby.order]\n if not grouping_vars or self.common_grid is True:\n res = self._transform(data, orient, grouping_vars)\n else:\n if self.common_grid is False:\n grid_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n grid_vars = [v for v in self.common_grid if v in grouping_vars]\n\n res = (\n GroupBy(grid_vars)\n .apply(data, self._transform, orient, grouping_vars)\n )\n\n # Normalize, potentially within groups\n if not grouping_vars or self.common_norm is True:\n res = res.assign(group_weight=data[\"weight\"].sum())\n else:\n if self.common_norm is False:\n norm_vars = grouping_vars\n else:\n self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n norm_vars = [v for v in self.common_norm if v in grouping_vars]\n\n res = res.join(\n data.groupby(norm_vars)[\"weight\"].sum().rename(\"group_weight\"),\n on=norm_vars,\n )\n\n res[\"density\"] *= res.eval(\"weight / group_weight\")\n value = {\"x\": \"y\", \"y\": \"x\"}[orient]\n res[value] = res[\"density\"]\n return res.drop([\"weight\", \"group_weight\"], axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 161, "name": "_get_support", "kind": "ref", "category": "function", "info": " support = self._get_support(data, orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 167, "name": "_fit_and_evaluate", "kind": "ref", "category": "function", "info": " return self._fit_and_evaluate(data, orient, support)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 168, "name": "GroupBy", "kind": "ref", "category": "function", "info": " groupby = GroupBy(grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 182, "name": "_transform", "kind": "ref", "category": "function", "info": " res = self._transform(data, orient, grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 187, "name": "_check_var_list_or_boolean", "kind": "ref", "category": "function", "info": " self._check_var_list_or_boolean(\"common_grid\", grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 191, "name": "GroupBy", "kind": "ref", "category": "function", "info": " GroupBy(grid_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/density.py", "rel_fname": "seaborn/_stats/density.py", "line": 202, "name": "_check_var_list_or_boolean", "kind": "ref", "category": "function", "info": " self._check_var_list_or_boolean(\"common_norm\", grouping_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 37, "name": "Perc", "kind": "def", "category": "class", "info": "_percentile\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 61, "name": "_percentile", "kind": "def", "category": "function", "info": " def _percentile(self, data: DataFrame, var: str) -> DataFrame:\n\n k = list(np.linspace(0, 100, self.k)) if isinstance(self.k, int) else self.k\n method = cast(_MethodKind, self.method)\n values = data[var].dropna()\n if _version_predates(np, \"1.22\"):\n res = np.percentile(values, k, interpolation=method) # type: ignore\n else:\n res = np.percentile(data[var].dropna(), k, method=method)\n return DataFrame({var: res, \"percentile\": k})\n\n def __call__(\n self, data: DataFrame, groupby: GroupBy, orient: str, scales: dict[str, Scale],\n ) -> DataFrame:\n\n var = {\"x\": \"y\", \"y\": \"x\"}[orient]\n return groupby.apply(data, self._percentile, var)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/order.py", "rel_fname": "seaborn/_stats/order.py", "line": 66, "name": "_version_predates", "kind": "ref", "category": "function", "info": " if _version_predates(np, \"1.22\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 10, "name": "PolyFit", "kind": "def", "category": "class", "info": "_fit_predict\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 21, "name": "_fit_predict", "kind": "def", "category": "function", "info": " def _fit_predict(self, data):\n\n x = data[\"x\"]\n y = data[\"y\"]\n if x.nunique() <= self.order:\n # TODO warn?\n xx = yy = []\n else:\n p = np.polyfit(x, y, self.order)\n xx = np.linspace(x.min(), x.max(), self.gridsize)\n yy = np.polyval(p, xx)\n\n return pd.DataFrame(dict(x=xx, y=yy))\n\n # TODO we should have a way of identifying the method that will be applied\n # and then only define __call__ on a base-class of stats with this pattern\n\n def __call__(self, data, groupby, orient, scales):\n\n return (\n groupby\n .apply(data.dropna(subset=[\"x\", \"y\"]), self._fit_predict)\n )\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 25, "name": "nunique", "kind": "ref", "category": "function", "info": " if x.nunique() <= self.order:\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 42, "name": "apply", "kind": "ref", "category": "function", "info": " .apply(data.dropna(subset=[\"x\", \"y\"]), self._fit_predict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 42, "name": "dropna", "kind": "ref", "category": "function", "info": " .apply(data.dropna(subset=[\"x\", \"y\"]), self._fit_predict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/_stats/regression.py", "rel_fname": "seaborn/_stats/regression.py", "line": 47, "name": "OLSFit", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 5, "name": "bootstrap", "kind": "def", "category": "function", "info": "def bootstrap(*args, **kwargs):\n \"\"\"Resample one or more arrays with replacement and store aggregate values.\n\n Positional arguments are a sequence of arrays to bootstrap along the first\n axis and pass to a summary function.\n\n Keyword arguments:\n n_boot : int, default=10000\n Number of iterations\n axis : int, default=None\n Will pass axis to ``func`` as a keyword argument.\n units : array, default=None\n Array of sampling unit IDs. When used the bootstrap resamples units\n and then observations within units instead of individual\n datapoints.\n func : string or callable, default=\"mean\"\n Function to call on the args that are passed in. If string, uses as\n name of function in the numpy namespace. If nans are present in the\n data, will try to use nan-aware version of named function.\n seed : Generator | SeedSequence | RandomState | int | None\n Seed for the random number generator; useful if you want\n reproducible resamples.\n\n Returns\n -------\n boot_dist: array\n array of bootstrapped statistic values\n\n \"\"\"\n # Ensure list of arrays are same length\n if len(np.unique(list(map(len, args)))) > 1:\n raise ValueError(\"All input arrays must have the same length\")\n n = len(args[0])\n\n # Default keyword arguments\n n_boot = kwargs.get(\"n_boot\", 10000)\n func = kwargs.get(\"func\", \"mean\")\n axis = kwargs.get(\"axis\", None)\n units = kwargs.get(\"units\", None)\n random_seed = kwargs.get(\"random_seed\", None)\n if random_seed is not None:\n msg = \"`random_seed` has been renamed to `seed` and will be removed\"\n warnings.warn(msg)\n seed = kwargs.get(\"seed\", random_seed)\n if axis is None:\n func_kwargs = dict()\n else:\n func_kwargs = dict(axis=axis)\n\n # Initialize the resampler\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n rng = np.random.default_rng(seed)\n\n # Coerce to arrays\n args = list(map(np.asarray, args))\n if units is not None:\n units = np.asarray(units)\n\n if isinstance(func, str):\n\n # Allow named numpy functions\n f = getattr(np, func)\n\n # Try to use nan-aware version of function if necessary\n missing_data = np.isnan(np.sum(np.column_stack(args)))\n\n if missing_data and not func.startswith(\"nan\"):\n nanf = getattr(np, f\"nan{func}\", None)\n if nanf is None:\n msg = f\"Data contain nans but no nan-aware version of `{func}` found\"\n warnings.warn(msg, UserWarning)\n else:\n f = nanf\n\n else:\n f = func\n\n # Handle numpy changes\n try:\n integers = rng.integers\n except AttributeError:\n integers = rng.randint\n\n # Do the bootstrap\n if units is not None:\n return _structured_bootstrap(args, n_boot, units, f,\n func_kwargs, integers)\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n sample = [a.take(resampler, axis=0) for a in args]\n boot_dist.append(f(*sample, **func_kwargs))\n return np.array(boot_dist)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 58, "name": "default_rng", "kind": "ref", "category": "function", "info": " rng = np.random.default_rng(seed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 92, "name": "_structured_bootstrap", "kind": "ref", "category": "function", "info": " return _structured_bootstrap(args, n_boot, units, f,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 97, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n, n, dtype=np.intp) # intp is indexing dtype\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 99, "name": "f", "kind": "ref", "category": "function", "info": " boot_dist.append(f(*sample, **func_kwargs))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 103, "name": "_structured_bootstrap", "kind": "def", "category": "function", "info": "def _structured_bootstrap(args, n_boot, units, func, func_kwargs, integers):\n \"\"\"Resample units instead of datapoints.\"\"\"\n unique_units = np.unique(units)\n n_units = len(unique_units)\n\n args = [[a[units == unit] for unit in unique_units] for a in args]\n\n boot_dist = []\n for i in range(int(n_boot)):\n resampler = integers(0, n_units, n_units, dtype=np.intp)\n sample = [[a[i] for i in resampler] for a in args]\n lengths = map(len, sample[0])\n resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n sample = [[c.take(r, axis=0) for c, r in zip(a, resampler)] for a in sample]\n sample = list(map(np.concatenate, sample))\n boot_dist.append(func(*sample, **func_kwargs))\n return np.array(boot_dist)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 112, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = integers(0, n_units, n_units, dtype=np.intp)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 115, "name": "integers", "kind": "ref", "category": "function", "info": " resampler = [integers(0, n, n, dtype=np.intp) for n in lengths]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/algorithms.py", "rel_fname": "seaborn/algorithms.py", "line": 118, "name": "func", "kind": "ref", "category": "function", "info": " boot_dist.append(func(*sample, **func_kwargs))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 26, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 31, "name": "_BaseGrid", "kind": "def", "category": "class", "info": "set\tfig\tfigure\tapply\tpipe\tsavefig"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 42, "name": "fig", "kind": "def", "category": "function", "info": " def fig(self):\n \"\"\"DEPRECATED: prefer the `figure` property.\"\"\"\n # Grid.figure is preferred because it matches the Axes attribute name.\n # But as the maintanace burden on having this property is minimal,\n # let's be slow about formally deprecating it. For now just note its deprecation\n # in the docstring; add a warning in version 0.13, and eventually remove it.\n return self._figure\n\n @property\n def figure(self):\n \"\"\"Access the :class:`matplotlib.figure.Figure` object underlying the grid.\"\"\"\n return self._figure\n\n def apply(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n \"\"\"\n func(self, *args, **kwargs)\n return self\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 55, "name": "apply", "kind": "def", "category": "function", "info": " def apply(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return self.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` is ignored; this method returns self.\n See the `pipe` method if you want the return value.\n\n Added in v0.12.0.\n\n \"\"\"\n func(self, *args, **kwargs)\n return self\n\n def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 67, "name": "func", "kind": "ref", "category": "function", "info": " func(self, *args, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 70, "name": "pipe", "kind": "def", "category": "function", "info": " def pipe(self, func, *args, **kwargs):\n \"\"\"\n Pass the grid to a user-supplied function and return its value.\n\n The `func` must accept an object of this type for its first\n positional argument. Additional arguments are passed through.\n The return value of `func` becomes the return value of this method.\n See the `apply` method if you want to return self instead.\n\n Added in v0.12.0.\n\n \"\"\"\n return func(self, *args, **kwargs)\n\n def savefig(self, *args, **kwargs):\n \"\"\"\n Save an image of the plot.\n\n This wraps :meth:`matplotlib.figure.Figure.savefig`, using bbox_inches=\"tight\"\n by default. Parameters are passed through to the matplotlib function.\n\n \"\"\"\n kwargs = kwargs.copy()\n kwargs.setdefault(\"bbox_inches\", \"tight\")\n self.figure.savefig(*args, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 82, "name": "func", "kind": "ref", "category": "function", "info": " return func(self, *args, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 97, "name": "Grid", "kind": "def", "category": "class", "info": "__init__\ttight_layout\tadd_legend\t_update_legend_data\t_get_palette\tlegend\ttick_params"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 120, "name": "add_legend", "kind": "def", "category": "function", "info": " def add_legend(self, legend_data=None, title=None, label_order=None,\n adjust_subtitles=False, **kwargs):\n \"\"\"Draw a legend, maybe placing it outside axes and resizing the figure.\n\n Parameters\n ----------\n legend_data : dict\n Dictionary mapping label names (or two-element tuples where the\n second element is a label name) to matplotlib artist handles. The\n default reads from ``self._legend_data``.\n title : string\n Title for the legend. The default reads from ``self._hue_var``.\n label_order : list of labels\n The order that the legend entries should appear in. The default\n reads from ``self.hue_names``.\n adjust_subtitles : bool\n If True, modify entries with invisible artists to left-align\n the labels and set the font size to that of a title.\n kwargs : key, value pairings\n Other keyword arguments are passed to the underlying legend methods\n on the Figure or Axes object.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n # Find the data for the legend\n if legend_data is None:\n legend_data = self._legend_data\n if label_order is None:\n if self.hue_names is None:\n label_order = list(legend_data.keys())\n else:\n label_order = list(map(utils.to_utf8, self.hue_names))\n\n blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n handles = [legend_data.get(l, blank_handle) for l in label_order]\n title = self._hue_var if title is None else title\n title_size = mpl.rcParams[\"legend.title_fontsize\"]\n\n # Unpack nested labels from a hierarchical legend\n labels = []\n for entry in label_order:\n if isinstance(entry, tuple):\n _, label = entry\n else:\n label = entry\n labels.append(label)\n\n # Set default legend kwargs\n kwargs.setdefault(\"scatterpoints\", 1)\n\n if self._legend_out:\n\n kwargs.setdefault(\"frameon\", False)\n kwargs.setdefault(\"loc\", \"center right\")\n\n # Draw a full-figure legend outside the grid\n figlegend = self._figure.legend(handles, labels, **kwargs)\n\n self._legend = figlegend\n figlegend.set_title(title, prop={\"size\": title_size})\n\n if adjust_subtitles:\n adjust_legend_subtitles(figlegend)\n\n # Draw the plot to set the bounding boxes correctly\n _draw_figure(self._figure)\n\n # Calculate and set the new width of the figure so the legend fits\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n fig_width, fig_height = self._figure.get_size_inches()\n self._figure.set_size_inches(fig_width + legend_width, fig_height)\n\n # Draw the plot again to get the new transformations\n _draw_figure(self._figure)\n\n # Now calculate how much space we need on the right side\n legend_width = figlegend.get_window_extent().width / self._figure.dpi\n space_needed = legend_width / (fig_width + legend_width)\n margin = .04 if self._margin_titles else .01\n self._space_needed = margin + space_needed\n right = 1 - self._space_needed\n\n # Place the subplot axes to give space for the legend\n self._figure.subplots_adjust(right=right)\n self._tight_layout_rect[2] = right\n\n else:\n # Draw a legend in the first axis\n ax = self.axes.flat[0]\n kwargs.setdefault(\"loc\", \"best\")\n\n leg = ax.legend(handles, labels, **kwargs)\n leg.set_title(title, prop={\"size\": title_size})\n self._legend = leg\n\n if adjust_subtitles:\n adjust_legend_subtitles(leg)\n\n return self\n\n def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = get_legend_handles(ax.legend_)\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 157, "name": "Patch", "kind": "ref", "category": "function", "info": " blank_handle = mpl.patches.Patch(alpha=0, linewidth=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 183, "name": "set_title", "kind": "ref", "category": "function", "info": " figlegend.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 186, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(figlegend)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 189, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 192, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 193, "name": "get_size_inches", "kind": "ref", "category": "function", "info": " fig_width, fig_height = self._figure.get_size_inches()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 194, "name": "set_size_inches", "kind": "ref", "category": "function", "info": " self._figure.set_size_inches(fig_width + legend_width, fig_height)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 197, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(self._figure)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 200, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " legend_width = figlegend.get_window_extent().width / self._figure.dpi\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 216, "name": "set_title", "kind": "ref", "category": "function", "info": " leg.set_title(title, prop={\"size\": title_size})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 220, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(leg)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 224, "name": "_update_legend_data", "kind": "def", "category": "function", "info": " def _update_legend_data(self, ax):\n \"\"\"Extract the legend data from an axes object and save it.\"\"\"\n data = {}\n\n # Get data directly from the legend, which is necessary\n # for newer functions that don't add labeled proxy artists\n if ax.legend_ is not None and self._extract_legend_handles:\n handles = get_legend_handles(ax.legend_)\n labels = [t.get_text() for t in ax.legend_.texts]\n data.update({l: h for h, l in zip(handles, labels)})\n\n handles, labels = ax.get_legend_handles_labels()\n data.update({l: h for h, l in zip(handles, labels)})\n\n self._legend_data.update(data)\n\n # Now clear the legend\n ax.legend_ = None\n\n def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 231, "name": "get_legend_handles", "kind": "ref", "category": "function", "info": " handles = get_legend_handles(ax.legend_)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 232, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in ax.legend_.texts]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 235, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, labels = ax.get_legend_handles_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 243, "name": "_get_palette", "kind": "def", "category": "function", "info": " def _get_palette(self, data, hue, hue_order, palette):\n \"\"\"Get a list of colors for the hue variable.\"\"\"\n if hue is None:\n palette = color_palette(n_colors=1)\n\n else:\n hue_names = categorical_order(data[hue], hue_order)\n n_colors = len(hue_names)\n\n # By default use either the current color palette or HUSL\n if palette is None:\n current_palette = utils.get_color_cycle()\n if n_colors > len(current_palette):\n colors = color_palette(\"husl\", n_colors)\n else:\n colors = color_palette(n_colors=n_colors)\n\n # Allow for palette to map from hue variable names\n elif isinstance(palette, dict):\n color_names = [palette[h] for h in hue_names]\n colors = color_palette(color_names, n_colors)\n\n # Otherwise act as if we just got a list of colors\n else:\n colors = color_palette(palette, n_colors)\n\n palette = color_palette(colors, n_colors)\n\n return palette\n\n @property\n def legend(self):\n \"\"\"The :class:`matplotlib.legend.Legend` object, if present.\"\"\"\n try:\n return self._legend\n except AttributeError:\n return None\n\n def tick_params(self, axis='both', **kwargs):\n \"\"\"Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \"\"\"\n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 246, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(n_colors=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 249, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 254, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 256, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(\"husl\", n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 258, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 263, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(color_names, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 267, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 269, "name": "color_palette", "kind": "ref", "category": "function", "info": " palette = color_palette(colors, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 362, "name": "FacetGrid", "kind": "def", "category": "class", "info": "__init__\tfacet_data\tmap\tmap_dataframe\t_facet_color\t_facet_plot\t_finalize_grid\tfacet_axis\tdespine\tset_axis_labels\tset_xlabels\tset_ylabels\tset_xticklabels\tset_yticklabels\tset_titles\trefline\taxes\tax\taxes_dict\t_inner_axes\t_left_axes\t_not_left_axes\t_bottom_axes\t_not_bottom_axes"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 382, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 384, "name": "_get_palette", "kind": "ref", "category": "function", "info": " colors = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 390, "name": "categorical_order", "kind": "ref", "category": "function", "info": " row_names = categorical_order(data[row], row_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 395, "name": "categorical_order", "kind": "ref", "category": "function", "info": " col_names = categorical_order(data[col], col_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 445, "name": "_disable_autolayout", "kind": "ref", "category": "function", "info": " with _disable_autolayout():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 475, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[0] = fig.add_subplot(nrow, ncol, 1, **subplot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 481, "name": "add_subplot", "kind": "ref", "category": "function", "info": " axes[i] = fig.add_subplot(nrow, ncol, i + 1, **subplot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 524, "name": "set_titles", "kind": "ref", "category": "function", "info": " self.set_titles()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 528, "name": "despine", "kind": "ref", "category": "function", "info": " self.despine()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 532, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 533, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 534, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 535, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 539, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 540, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 541, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 542, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 636, "name": "facet_data", "kind": "def", "category": "function", "info": " def facet_data(self):\n \"\"\"Generator for name indices and data subsets for each facet.\n\n Yields\n ------\n (i, j, k), data_ijk : tuple of ints, DataFrame\n The ints provide an index into the {row, col, hue}_names attribute,\n and the dataframe contains a subset of the full data corresponding\n to each facet. The generator yields subsets that correspond with\n the self.axes.flat iterator, or self.axes[i, j] when `col_wrap`\n is None.\n\n \"\"\"\n data = self.data\n\n # Construct masks for the row variable\n if self.row_names:\n row_masks = [data[self._row_var] == n for n in self.row_names]\n else:\n row_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the column variable\n if self.col_names:\n col_masks = [data[self._col_var] == n for n in self.col_names]\n else:\n col_masks = [np.repeat(True, len(self.data))]\n\n # Construct masks for the hue variable\n if self.hue_names:\n hue_masks = [data[self._hue_var] == n for n in self.hue_names]\n else:\n hue_masks = [np.repeat(True, len(self.data))]\n\n # Here is the main generator loop\n for (i, row), (j, col), (k, hue) in product(enumerate(row_masks),\n enumerate(col_masks),\n enumerate(hue_masks)):\n data_ijk = data[row & col & hue & self._not_na]\n yield (i, j, k), data_ijk\n\n def map(self, func, *args, **kwargs):\n \"\"\"Apply a plotting function to each facet's subset of the data.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. It\n must plot to the currently active matplotlib Axes and take a\n `color` keyword argument. If faceting on the `hue` dimension,\n it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # How we use the function depends on where it comes from\n func_module = str(getattr(func, \"__module__\", \"\"))\n\n # Check for categorical plots without order information\n if func_module == \"seaborn.categorical\":\n if \"order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n if len(args) == 3 and \"hue_order\" not in kwargs:\n warning = (\"Using the {} function without specifying \"\n \"`hue_order` is likely to produce an incorrect \"\n \"plot.\".format(func.__name__))\n warnings.warn(warning)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not func_module.startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n\n # Get the actual data we are going to plot with\n plot_data = data_ijk[list(args)]\n if self._dropna:\n plot_data = plot_data.dropna()\n plot_args = [v for k, v in plot_data.items()]\n\n # Some matplotlib functions don't handle pandas objects correctly\n if func_module.startswith(\"matplotlib\"):\n plot_args = [v.values for v in plot_args]\n\n # Draw the plot\n self._facet_plot(func, ax, plot_args, kwargs)\n\n # Finalize the annotations and layout\n self._finalize_grid(args[:2])\n\n return self\n\n def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 719, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 727, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 730, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 738, "name": "to_utf8", "kind": "ref", "category": "function", "info": " kwargs[\"label\"] = utils.to_utf8(self.hue_names[hue_k])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 743, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 751, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, plot_args, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 754, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(args[:2])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 758, "name": "map_dataframe", "kind": "def", "category": "function", "info": " def map_dataframe(self, func, *args, **kwargs):\n \"\"\"Like ``.map`` but passes args as strings and inserts data in kwargs.\n\n This method is suitable for plotting with functions that accept a\n long-form DataFrame as a `data` keyword argument and access the\n data in that DataFrame using string variable names.\n\n Parameters\n ----------\n func : callable\n A plotting function that takes data and keyword arguments. Unlike\n the `map` method, a function used here must \"understand\" Pandas\n objects. It also must plot to the currently active matplotlib Axes\n and take a `color` keyword argument. If faceting on the `hue`\n dimension, it must also take a `label` keyword argument.\n args : strings\n Column names in self.data that identify variables with data to\n plot. The data for each variable is passed to `func` in the\n order the variables are specified in the call.\n kwargs : keyword arguments\n All keyword arguments are passed to the plotting function.\n\n Returns\n -------\n self : object\n Returns self.\n\n \"\"\"\n\n # If color was a keyword argument, grab it here\n kw_color = kwargs.pop(\"color\", None)\n\n # Iterate over the data subsets\n for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n\n # If this subset is null, move on\n if not data_ijk.values.size:\n continue\n\n # Get the current axis\n modify_state = not str(func.__module__).startswith(\"seaborn\")\n ax = self.facet_axis(row_i, col_j, modify_state)\n\n # Decide what color to plot with\n kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n\n # Insert the other hue aesthetics if appropriate\n for kw, val_list in self.hue_kws.items():\n kwargs[kw] = val_list[hue_k]\n\n # Insert a label in the keyword arguments for the legend\n if self._hue_var is not None:\n kwargs[\"label\"] = self.hue_names[hue_k]\n\n # Stick the facet dataframe into the kwargs\n if self._dropna:\n data_ijk = data_ijk.dropna()\n kwargs[\"data\"] = data_ijk\n\n # Draw the plot\n self._facet_plot(func, ax, args, kwargs)\n\n # For axis labels, prefer to use positional args for backcompat\n # but also extract the x/y kwargs and use if no corresponding arg\n axis_labels = [kwargs.get(\"x\", None), kwargs.get(\"y\", None)]\n for i, val in enumerate(args[:2]):\n axis_labels[i] = val\n self._finalize_grid(axis_labels)\n\n return self\n\n def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 791, "name": "facet_data", "kind": "ref", "category": "function", "info": " for (row_i, col_j, hue_k), data_ijk in self.facet_data():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 799, "name": "facet_axis", "kind": "ref", "category": "function", "info": " ax = self.facet_axis(row_i, col_j, modify_state)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 802, "name": "_facet_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = self._facet_color(hue_k, kw_color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 814, "name": "dropna", "kind": "ref", "category": "function", "info": " data_ijk = data_ijk.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 818, "name": "_facet_plot", "kind": "ref", "category": "function", "info": " self._facet_plot(func, ax, args, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 825, "name": "_finalize_grid", "kind": "ref", "category": "function", "info": " self._finalize_grid(axis_labels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 829, "name": "_facet_color", "kind": "def", "category": "function", "info": " def _facet_color(self, hue_index, kw_color):\n\n color = self._colors[hue_index]\n if kw_color is not None:\n return kw_color\n elif color is not None:\n return color\n\n def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 837, "name": "_facet_plot", "kind": "def", "category": "function", "info": " def _facet_plot(self, func, ax, plot_args, plot_kwargs):\n\n # Draw the plot\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs = plot_kwargs.copy()\n semantics = [\"x\", \"y\", \"hue\", \"size\", \"style\"]\n for key, val in zip(semantics, plot_args):\n plot_kwargs[key] = val\n plot_args = []\n plot_kwargs[\"ax\"] = ax\n func(*plot_args, **plot_kwargs)\n\n # Sort out the supporting information\n self._update_legend_data(ax)\n\n def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 847, "name": "func", "kind": "ref", "category": "function", "info": " func(*plot_args, **plot_kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 850, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 852, "name": "_finalize_grid", "kind": "def", "category": "function", "info": " def _finalize_grid(self, axlabels):\n \"\"\"Finalize the annotations and layout.\"\"\"\n self.set_axis_labels(*axlabels)\n self.tight_layout()\n\n def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 854, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " self.set_axis_labels(*axlabels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 857, "name": "facet_axis", "kind": "def", "category": "function", "info": " def facet_axis(self, row_i, col_j, modify_state=True):\n \"\"\"Make the axis identified by these indices active and return it.\"\"\"\n\n # Calculate the actual indices of the axes to plot on\n if self._col_wrap is not None:\n ax = self.axes.flat[col_j]\n else:\n ax = self.axes[row_i, col_j]\n\n # Get a reference to the axes object we want, and make it active\n if modify_state:\n plt.sca(ax)\n return ax\n\n def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 871, "name": "despine", "kind": "def", "category": "function", "info": " def despine(self, **kwargs):\n \"\"\"Remove axis spines from the facets.\"\"\"\n utils.despine(self._figure, **kwargs)\n return self\n\n def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 873, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(self._figure, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 876, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, x_var=None, y_var=None, clear_inner=True, **kwargs):\n \"\"\"Set axis labels on the left column and bottom row of the grid.\"\"\"\n if x_var is not None:\n self._x_var = x_var\n self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n if y_var is not None:\n self._y_var = y_var\n self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n\n return self\n\n def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 880, "name": "set_xlabels", "kind": "ref", "category": "function", "info": " self.set_xlabels(x_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 883, "name": "set_ylabels", "kind": "ref", "category": "function", "info": " self.set_ylabels(y_var, clear_inner=clear_inner, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 887, "name": "set_xlabels", "kind": "def", "category": "function", "info": " def set_xlabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the x axis on the bottom row of the grid.\"\"\"\n if label is None:\n label = self._x_var\n for ax in self._bottom_axes:\n ax.set_xlabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_bottom_axes:\n ax.set_xlabel(\"\")\n return self\n\n def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 892, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 895, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 898, "name": "set_ylabels", "kind": "def", "category": "function", "info": " def set_ylabels(self, label=None, clear_inner=True, **kwargs):\n \"\"\"Label the y axis on the left column of the grid.\"\"\"\n if label is None:\n label = self._y_var\n for ax in self._left_axes:\n ax.set_ylabel(label, **kwargs)\n if clear_inner:\n for ax in self._not_left_axes:\n ax.set_ylabel(\"\")\n return self\n\n def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 903, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 906, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(\"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 909, "name": "set_xticklabels", "kind": "def", "category": "function", "info": " def set_xticklabels(self, labels=None, step=None, **kwargs):\n \"\"\"Set x axis tick labels of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_xticks()\n ax.set_xticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n if step is not None:\n xticks = ax.get_xticks()[::step]\n curr_labels = curr_labels[::step]\n ax.set_xticks(xticks)\n ax.set_xticklabels(curr_labels, **kwargs)\n else:\n ax.set_xticklabels(labels, **kwargs)\n return self\n\n def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 912, "name": "get_xticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_xticks()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 913, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(curr_ticks)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 915, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 915, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_xticklabels()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 917, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = ax.get_xticks()[::step]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 919, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(xticks)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 920, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 922, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(labels, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 925, "name": "set_yticklabels", "kind": "def", "category": "function", "info": " def set_yticklabels(self, labels=None, **kwargs):\n \"\"\"Set y axis tick labels on the left column of the grid.\"\"\"\n for ax in self.axes.flat:\n curr_ticks = ax.get_yticks()\n ax.set_yticks(curr_ticks)\n if labels is None:\n curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n ax.set_yticklabels(curr_labels, **kwargs)\n else:\n ax.set_yticklabels(labels, **kwargs)\n return self\n\n def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 928, "name": "get_yticks", "kind": "ref", "category": "function", "info": " curr_ticks = ax.get_yticks()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 929, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(curr_ticks)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "get_text", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 931, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " curr_labels = [l.get_text() for l in ax.get_yticklabels()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 932, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(curr_labels, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 934, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(labels, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 937, "name": "set_titles", "kind": "def", "category": "function", "info": " def set_titles(self, template=None, row_template=None, col_template=None,\n **kwargs):\n \"\"\"Draw titles either above each facet or on the grid margins.\n\n Parameters\n ----------\n template : string\n Template for all titles with the formatting keys {col_var} and\n {col_name} (if using a `col` faceting variable) and/or {row_var}\n and {row_name} (if using a `row` faceting variable).\n row_template:\n Template for the row variable when titles are drawn on the grid\n margins. Must have {row_var} and {row_name} formatting keys.\n col_template:\n Template for the column variable when titles are drawn on the grid\n margins. Must have {col_var} and {col_name} formatting keys.\n\n Returns\n -------\n self: object\n Returns self.\n\n \"\"\"\n args = dict(row_var=self._row_var, col_var=self._col_var)\n kwargs[\"size\"] = kwargs.pop(\"size\", mpl.rcParams[\"axes.labelsize\"])\n\n # Establish default templates\n if row_template is None:\n row_template = \"{row_var} = {row_name}\"\n if col_template is None:\n col_template = \"{col_var} = {col_name}\"\n if template is None:\n if self._row_var is None:\n template = col_template\n elif self._col_var is None:\n template = row_template\n else:\n template = \" | \".join([row_template, col_template])\n\n row_template = utils.to_utf8(row_template)\n col_template = utils.to_utf8(col_template)\n template = utils.to_utf8(template)\n\n if self._margin_titles:\n\n # Remove any existing title texts\n for text in self._margin_titles_texts:\n text.remove()\n self._margin_titles_texts = []\n\n if self.row_names is not None:\n # Draw the row titles on the right edge of the grid\n for i, row_name in enumerate(self.row_names):\n ax = self.axes[i, -1]\n args.update(dict(row_name=row_name))\n title = row_template.format(**args)\n text = ax.annotate(\n title, xy=(1.02, .5), xycoords=\"axes fraction\",\n rotation=270, ha=\"left\", va=\"center\",\n **kwargs\n )\n self._margin_titles_texts.append(text)\n\n if self.col_names is not None:\n # Draw the column titles as normal titles\n for j, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = col_template.format(**args)\n self.axes[0, j].set_title(title, **kwargs)\n\n return self\n\n # Otherwise title each facet with all the necessary information\n if (self._row_var is not None) and (self._col_var is not None):\n for i, row_name in enumerate(self.row_names):\n for j, col_name in enumerate(self.col_names):\n args.update(dict(row_name=row_name, col_name=col_name))\n title = template.format(**args)\n self.axes[i, j].set_title(title, **kwargs)\n elif self.row_names is not None and len(self.row_names):\n for i, row_name in enumerate(self.row_names):\n args.update(dict(row_name=row_name))\n title = template.format(**args)\n self.axes[i, 0].set_title(title, **kwargs)\n elif self.col_names is not None and len(self.col_names):\n for i, col_name in enumerate(self.col_names):\n args.update(dict(col_name=col_name))\n title = template.format(**args)\n # Index the flat array so col_wrap works\n self.axes.flat[i].set_title(title, **kwargs)\n return self\n\n def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 976, "name": "to_utf8", "kind": "ref", "category": "function", "info": " row_template = utils.to_utf8(row_template)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 977, "name": "to_utf8", "kind": "ref", "category": "function", "info": " col_template = utils.to_utf8(col_template)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 978, "name": "to_utf8", "kind": "ref", "category": "function", "info": " template = utils.to_utf8(template)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1005, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[0, j].set_title(title, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1015, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, j].set_title(title, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1020, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes[i, 0].set_title(title, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1026, "name": "set_title", "kind": "ref", "category": "function", "info": " self.axes.flat[i].set_title(title, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1029, "name": "refline", "kind": "def", "category": "function", "info": " def refline(self, *, x=None, y=None, color='.5', linestyle='--', **line_kws):\n \"\"\"Add a reference line(s) to each facet.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s). Pass ``color=None`` to\n use ``hue`` mapping.\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`FacetGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n self.map(plt.axvline, x=x, **line_kws)\n\n if y is not None:\n self.map(plt.axhline, y=y, **line_kws)\n\n return self\n\n # ------ Properties that are part of the public API and documented by Sphinx\n\n @property\n def axes(self):\n \"\"\"An array of the :class:`matplotlib.axes.Axes` objects in the grid.\"\"\"\n return self._axes\n\n @property\n def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1071, "name": "ax", "kind": "def", "category": "function", "info": " def ax(self):\n \"\"\"The :class:`matplotlib.axes.Axes` when no faceting variables are assigned.\"\"\"\n if self.axes.shape == (1, 1):\n return self.axes[0, 0]\n else:\n err = (\n \"Use the `.axes` attribute when facet variables are assigned.\"\n )\n raise AttributeError(err)\n\n @property\n def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1082, "name": "axes_dict", "kind": "def", "category": "function", "info": " def axes_dict(self):\n \"\"\"A mapping of facet names to corresponding :class:`matplotlib.axes.Axes`.\n\n If only one of ``row`` or ``col`` is assigned, each key is a string\n representing a level of that variable. If both facet dimensions are\n assigned, each key is a ``({row_level}, {col_level})`` tuple.\n\n \"\"\"\n return self._axes_dict\n\n # ------ Private properties, that require some computation to get\n\n @property\n def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1095, "name": "_inner_axes", "kind": "def", "category": "function", "info": " def _inner_axes(self):\n \"\"\"Return a flat array of the inner axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, 1:].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i % self._ncol\n and i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1113, "name": "_left_axes", "kind": "def", "category": "function", "info": " def _left_axes(self):\n \"\"\"Return a flat array of the left column of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 0].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if not i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1125, "name": "_not_left_axes", "kind": "def", "category": "function", "info": " def _not_left_axes(self):\n \"\"\"Return a flat array of axes that aren't on the left column.\"\"\"\n if self._col_wrap is None:\n return self.axes[:, 1:].flat\n else:\n axes = []\n for i, ax in enumerate(self.axes):\n if i % self._ncol:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1137, "name": "_bottom_axes", "kind": "def", "category": "function", "info": " def _bottom_axes(self):\n \"\"\"Return a flat array of the bottom row of axes.\"\"\"\n if self._col_wrap is None:\n return self.axes[-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i >= (self._ncol * (self._nrow - 1))\n or i >= (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n @property\n def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1154, "name": "_not_bottom_axes", "kind": "def", "category": "function", "info": " def _not_bottom_axes(self):\n \"\"\"Return a flat array of axes that aren't on the bottom row.\"\"\"\n if self._col_wrap is None:\n return self.axes[:-1, :].flat\n else:\n axes = []\n n_empty = self._nrow * self._ncol - self._n_facets\n for i, ax in enumerate(self.axes):\n append = (\n i < (self._ncol * (self._nrow - 1))\n and i < (self._ncol * (self._nrow - 1) - n_empty)\n )\n if append:\n axes.append(ax)\n return np.array(axes, object).flat\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1171, "name": "PairGrid", "kind": "def", "category": "class", "info": "__init__\tmap\tmap_lower\tmap_upper\tmap_offdiag\tmap_diag\t_map_diag_iter_hue\t_map_bivariate\t_plot_bivariate\t_plot_bivariate_iter_hue\t_add_axis_labels\t_find_numeric_cols"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1244, "name": "_find_numeric_cols", "kind": "ref", "category": "function", "info": " numeric_cols = self._find_numeric_cols(data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1272, "name": "_disable_autolayout", "kind": "ref", "category": "function", "info": " with _disable_autolayout():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1303, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1320, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = hue_order = categorical_order(data[hue], hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1332, "name": "_get_palette", "kind": "ref", "category": "function", "info": " self.palette = self._get_palette(data, hue, hue_order, palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1339, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_xticklabels():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1340, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1341, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1342, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.xaxis.label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1347, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " for label in ax.get_yticklabels():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1348, "name": "set_visible", "kind": "ref", "category": "function", "info": " label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1349, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.offsetText.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1350, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1356, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(fig=fig)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1372, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1376, "name": "map_lower", "kind": "def", "category": "function", "info": " def map_lower(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the lower diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.tril_indices_from(self.axes, -1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1388, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1391, "name": "map_upper", "kind": "def", "category": "function", "info": " def map_upper(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the upper diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n indices = zip(*np.triu_indices_from(self.axes, 1))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1403, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1406, "name": "map_offdiag", "kind": "def", "category": "function", "info": " def map_offdiag(self, func, **kwargs):\n \"\"\"Plot with a bivariate function on the off-diagonal subplots.\n\n Parameters\n ----------\n func : callable plotting function\n Must take x, y arrays as positional arguments and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n if self.square_grid:\n self.map_lower(func, **kwargs)\n if not self._corner:\n self.map_upper(func, **kwargs)\n else:\n indices = []\n for i, (y_var) in enumerate(self.y_vars):\n for j, (x_var) in enumerate(self.x_vars):\n if x_var != y_var:\n indices.append((i, j))\n self._map_bivariate(func, indices, **kwargs)\n return self\n\n def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1418, "name": "map_lower", "kind": "ref", "category": "function", "info": " self.map_lower(func, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1420, "name": "map_upper", "kind": "ref", "category": "function", "info": " self.map_upper(func, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1427, "name": "_map_bivariate", "kind": "ref", "category": "function", "info": " self._map_bivariate(func, indices, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1430, "name": "map_diag", "kind": "def", "category": "function", "info": " def map_diag(self, func, **kwargs):\n \"\"\"Plot with a univariate function on each diagonal subplot.\n\n Parameters\n ----------\n func : callable plotting function\n Must take an x array as a positional argument and draw onto the\n \"currently active\" matplotlib Axes. Also needs to accept kwargs\n called ``color`` and ``label``.\n\n \"\"\"\n # Add special diagonal axes for the univariate plot\n if self.diag_axes is None:\n diag_vars = []\n diag_axes = []\n for i, y_var in enumerate(self.y_vars):\n for j, x_var in enumerate(self.x_vars):\n if x_var == y_var:\n\n # Make the density axes\n diag_vars.append(x_var)\n ax = self.axes[i, j]\n diag_ax = ax.twinx()\n diag_ax.set_axis_off()\n diag_axes.append(diag_ax)\n\n # Work around matplotlib bug\n # https://github.com/matplotlib/matplotlib/issues/15188\n if not plt.rcParams.get(\"ytick.left\", True):\n for tick in ax.yaxis.majorTicks:\n tick.tick1line.set_visible(False)\n\n # Remove main y axis from density axes in a corner plot\n if self._corner:\n ax.yaxis.set_visible(False)\n if self._despine:\n utils.despine(ax=ax, left=True)\n # TODO add optional density ticks (on the right)\n # when drawing a corner plot?\n\n if self.diag_sharey and diag_axes:\n for ax in diag_axes[1:]:\n share_axis(diag_axes[0], ax, \"y\")\n\n self.diag_vars = np.array(diag_vars, np.object_)\n self.diag_axes = np.array(diag_axes, np.object_)\n\n if \"hue\" not in signature(func).parameters:\n return self._map_diag_iter_hue(func, **kwargs)\n\n # Loop over diagonal variables and axes, making one plot in each\n for var, ax in zip(self.diag_vars, self.diag_axes):\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n vector = self.data[var]\n if self._hue_var is not None:\n hue = self.data[self._hue_var]\n else:\n hue = None\n\n if self._dropna:\n not_na = vector.notna()\n if hue is not None:\n not_na &= hue.notna()\n vector = vector[not_na]\n if hue is not None:\n hue = hue[not_na]\n\n plot_kwargs.setdefault(\"hue\", hue)\n plot_kwargs.setdefault(\"hue_order\", self._hue_order)\n plot_kwargs.setdefault(\"palette\", self._orig_palette)\n func(x=vector, **plot_kwargs)\n ax.legend_ = None\n\n self._add_axis_labels()\n return self\n\n def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1453, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " diag_ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1460, "name": "set_visible", "kind": "ref", "category": "function", "info": " tick.tick1line.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1464, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax.yaxis.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1466, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1472, "name": "share_axis", "kind": "ref", "category": "function", "info": " share_axis(diag_axes[0], ax, \"y\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1478, "name": "_map_diag_iter_hue", "kind": "ref", "category": "function", "info": " return self._map_diag_iter_hue(func, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1506, "name": "func", "kind": "ref", "category": "function", "info": " func(x=vector, **plot_kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1509, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1512, "name": "_map_diag_iter_hue", "kind": "def", "category": "function", "info": " def _map_diag_iter_hue(self, func, **kwargs):\n \"\"\"Put marginal plot on each diagonal axes, iterating over hue.\"\"\"\n # Plot on each of the diagonal axes\n fixed_color = kwargs.pop(\"color\", None)\n\n for var, ax in zip(self.diag_vars, self.diag_axes):\n hue_grouped = self.data[var].groupby(self.hue_vals)\n\n plot_kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n plot_kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n for k, label_k in enumerate(self._hue_order):\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.Series([], dtype=float)\n\n if fixed_color is None:\n color = self.palette[k]\n else:\n color = fixed_color\n\n if self._dropna:\n data_k = utils.remove_na(data_k)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=data_k, label=label_k, color=color, **plot_kwargs)\n else:\n func(data_k, label=label_k, color=color, **plot_kwargs)\n\n self._add_axis_labels()\n\n return self\n\n def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1518, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data[var].groupby(self.hue_vals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1530, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1540, "name": "remove_na", "kind": "ref", "category": "function", "info": " data_k = utils.remove_na(data_k)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1543, "name": "func", "kind": "ref", "category": "function", "info": " func(x=data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1545, "name": "func", "kind": "ref", "category": "function", "info": " func(data_k, label=label_k, color=color, **plot_kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1547, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1551, "name": "_map_bivariate", "kind": "def", "category": "function", "info": " def _map_bivariate(self, func, indices, **kwargs):\n \"\"\"Draw a bivariate plot on the indicated axes.\"\"\"\n # This is a hack to handle the fact that new distribution plots don't add\n # their artists onto the axes. This is probably superior in general, but\n # we'll need a better way to handle it in the axisgrid functions.\n from .distributions import histplot, kdeplot\n if func is histplot or func is kdeplot:\n self._extract_legend_handles = True\n\n kws = kwargs.copy() # Use copy as we insert other kwargs\n for i, j in indices:\n x_var = self.x_vars[j]\n y_var = self.y_vars[i]\n ax = self.axes[i, j]\n if ax is None: # i.e. we are in corner mode\n continue\n self._plot_bivariate(x_var, y_var, ax, func, **kws)\n self._add_axis_labels()\n\n if \"hue\" in signature(func).parameters:\n self.hue_names = list(self._legend_data)\n\n def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1567, "name": "_plot_bivariate", "kind": "ref", "category": "function", "info": " self._plot_bivariate(x_var, y_var, ax, func, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1568, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1573, "name": "_plot_bivariate", "kind": "def", "category": "function", "info": " def _plot_bivariate(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot on the specified axes.\"\"\"\n if \"hue\" not in signature(func).parameters:\n self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n return\n\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n if self._hue_var is not None and self._hue_var not in axes_vars:\n axes_vars.append(self._hue_var)\n\n data = self.data[axes_vars]\n if self._dropna:\n data = data.dropna()\n\n x = data[x_var]\n y = data[y_var]\n if self._hue_var is None:\n hue = None\n else:\n hue = data.get(self._hue_var)\n\n if \"hue\" not in kwargs:\n kwargs.update({\n \"hue\": hue, \"hue_order\": self._hue_order, \"palette\": self._orig_palette,\n })\n func(x=x, y=y, **kwargs)\n\n self._update_legend_data(ax)\n\n def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1576, "name": "_plot_bivariate_iter_hue", "kind": "ref", "category": "function", "info": " self._plot_bivariate_iter_hue(x_var, y_var, ax, func, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1595, "name": "dropna", "kind": "ref", "category": "function", "info": " data = data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1608, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1610, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1612, "name": "_plot_bivariate_iter_hue", "kind": "def", "category": "function", "info": " def _plot_bivariate_iter_hue(self, x_var, y_var, ax, func, **kwargs):\n \"\"\"Draw a bivariate plot while iterating over hue subsets.\"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = ax\n else:\n plt.sca(ax)\n\n if x_var == y_var:\n axes_vars = [x_var]\n else:\n axes_vars = [x_var, y_var]\n\n hue_grouped = self.data.groupby(self.hue_vals)\n for k, label_k in enumerate(self._hue_order):\n\n kws = kwargs.copy()\n\n # Attempt to get data for this level, allowing for empty\n try:\n data_k = hue_grouped.get_group(label_k)\n except KeyError:\n data_k = pd.DataFrame(columns=axes_vars,\n dtype=float)\n\n if self._dropna:\n data_k = data_k[axes_vars].dropna()\n\n x = data_k[x_var]\n y = data_k[y_var]\n\n for kw, val_list in self.hue_kws.items():\n kws[kw] = val_list[k]\n kws.setdefault(\"color\", self.palette[k])\n if self._hue_var is not None:\n kws[\"label\"] = label_k\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=x, y=y, **kws)\n else:\n func(x, y, **kws)\n\n self._update_legend_data(ax)\n\n def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1625, "name": "groupby", "kind": "ref", "category": "function", "info": " hue_grouped = self.data.groupby(self.hue_vals)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1632, "name": "get_group", "kind": "ref", "category": "function", "info": " data_k = hue_grouped.get_group(label_k)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1638, "name": "dropna", "kind": "ref", "category": "function", "info": " data_k = data_k[axes_vars].dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1650, "name": "func", "kind": "ref", "category": "function", "info": " func(x=x, y=y, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1652, "name": "func", "kind": "ref", "category": "function", "info": " func(x, y, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1654, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1656, "name": "_add_axis_labels", "kind": "def", "category": "function", "info": " def _add_axis_labels(self):\n \"\"\"Add labels to the left and bottom Axes.\"\"\"\n for ax, label in zip(self.axes[-1, :], self.x_vars):\n ax.set_xlabel(label)\n for ax, label in zip(self.axes[:, 0], self.y_vars):\n ax.set_ylabel(label)\n\n def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1659, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(label)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1661, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(label)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1663, "name": "_find_numeric_cols", "kind": "def", "category": "function", "info": " def _find_numeric_cols(self, data):\n \"\"\"Find which variables in a DataFrame are numeric.\"\"\"\n numeric_cols = []\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n numeric_cols.append(col)\n return numeric_cols\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1667, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1672, "name": "JointGrid", "kind": "def", "category": "class", "info": "__init__\t_inject_kwargs\tplot\tplot_joint\tplot_marginals\trefline\tset_axis_labels"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1692, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_joint = f.add_subplot(gs[1:, :-1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1693, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_x = f.add_subplot(gs[0, :-1], sharex=ax_joint)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1694, "name": "add_subplot", "kind": "ref", "category": "function", "info": " ax_marg_y = f.add_subplot(gs[1:, -1], sharey=ax_joint)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1702, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1703, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1704, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1705, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1709, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1710, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.yaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1711, "name": "get_majorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_majorticklines(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1712, "name": "get_minorticklines", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.xaxis.get_minorticklines(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1713, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1714, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1715, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_x.get_yticklabels(minor=True), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1716, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(ax_marg_y.get_xticklabels(minor=True), visible=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1721, "name": "VectorPlotter", "kind": "ref", "category": "function", "info": " p = VectorPlotter(data=data, variables=dict(x=x, y=y, hue=hue))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1726, "name": "dropna", "kind": "ref", "category": "function", "info": " plot_data = plot_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1728, "name": "get_var", "kind": "def", "category": "function", "info": " def get_var(var):\n vector = plot_data.get(var, None)\n if vector is not None:\n vector = vector.rename(p.variables.get(var, None))\n return vector\n\n self.x = get_var(\"x\")\n self.y = get_var(\"y\")\n self.hue = get_var(\"hue\")\n\n for axis in \"xy\":\n name = p.variables.get(axis, None)\n if name is not None:\n getattr(ax_joint, f\"set_{axis}label\")(name)\n\n if xlim is not None:\n ax_joint.set_xlim(xlim)\n if ylim is not None:\n ax_joint.set_ylim(ylim)\n\n # Store the semantic mapping parameters for axes-level functions\n self._hue_params = dict(palette=palette, hue_order=hue_order, hue_norm=hue_norm)\n\n # Make the grid look nice\n utils.despine(f)\n if not marginal_ticks:\n utils.despine(ax=ax_marg_x, left=True)\n utils.despine(ax=ax_marg_y, bottom=True)\n for axes in [ax_marg_x, ax_marg_y]:\n for axis in [axes.xaxis, axes.yaxis]:\n axis.label.set_visible(False)\n f.tight_layout()\n f.subplots_adjust(hspace=space, wspace=space)\n\n def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1731, "name": "rename", "kind": "ref", "category": "function", "info": " vector = vector.rename(p.variables.get(var, None))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1734, "name": "get_var", "kind": "ref", "category": "function", "info": " self.x = get_var(\"x\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1735, "name": "get_var", "kind": "ref", "category": "function", "info": " self.y = get_var(\"y\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1736, "name": "get_var", "kind": "ref", "category": "function", "info": " self.hue = get_var(\"hue\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1744, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax_joint.set_xlim(xlim)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1746, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax_joint.set_ylim(ylim)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1752, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(f)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1754, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_x, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1755, "name": "despine", "kind": "ref", "category": "function", "info": " utils.despine(ax=ax_marg_y, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1758, "name": "set_visible", "kind": "ref", "category": "function", "info": " axis.label.set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1762, "name": "_inject_kwargs", "kind": "def", "category": "function", "info": " def _inject_kwargs(self, func, kws, params):\n \"\"\"Add params to kws if they are accepted by func.\"\"\"\n func_params = signature(func).parameters\n for key, val in params.items():\n if key in func_params:\n kws.setdefault(key, val)\n\n def plot(self, joint_func, marginal_func, **kwargs):\n \"\"\"Draw the plot by passing functions for joint and marginal axes.\n\n This method passes the ``kwargs`` dictionary to both functions. If you\n need more control, call :meth:`JointGrid.plot_joint` and\n :meth:`JointGrid.plot_marginals` directly with specific parameters.\n\n Parameters\n ----------\n joint_func, marginal_func : callables\n Functions to draw the bivariate and univariate plots. See methods\n referenced above for information about the required characteristics\n of these functions.\n kwargs\n Additional keyword arguments are passed to both functions.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.plot_marginals(marginal_func, **kwargs)\n self.plot_joint(joint_func, **kwargs)\n return self\n\n def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1791, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " self.plot_marginals(marginal_func, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1792, "name": "plot_joint", "kind": "ref", "category": "function", "info": " self.plot_joint(joint_func, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1795, "name": "plot_joint", "kind": "def", "category": "function", "info": " def plot_joint(self, func, **kwargs):\n \"\"\"Draw a bivariate plot on the joint axes of the grid.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y``. Otherwise,\n it must accept ``x`` and ``y`` vectors of data as the first two\n positional arguments, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, the function must\n accept ``hue`` as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n kwargs = kwargs.copy()\n if str(func.__module__).startswith(\"seaborn\"):\n kwargs[\"ax\"] = self.ax_joint\n else:\n plt.sca(self.ax_joint)\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if str(func.__module__).startswith(\"seaborn\"):\n func(x=self.x, y=self.y, **kwargs)\n else:\n func(self.x, self.y, **kwargs)\n\n return self\n\n def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1822, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1825, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, y=self.y, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1827, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, self.y, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1831, "name": "plot_marginals", "kind": "def", "category": "function", "info": " def plot_marginals(self, func, **kwargs):\n \"\"\"Draw univariate plots on each marginal axes.\n\n Parameters\n ----------\n func : plotting callable\n If a seaborn function, it should accept ``x`` and ``y`` and plot\n when only one of them is defined. Otherwise, it must accept a vector\n of data as the first positional argument and determine its orientation\n using the ``vertical`` parameter, and it must plot on the \"current\" axes.\n If ``hue`` was defined in the class constructor, it must accept ``hue``\n as a parameter.\n kwargs\n Keyword argument are passed to the plotting function.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n seaborn_func = (\n str(func.__module__).startswith(\"seaborn\")\n # deprecated distplot has a legacy API, special case it\n and not func.__name__ == \"distplot\"\n )\n func_params = signature(func).parameters\n kwargs = kwargs.copy()\n if self.hue is not None:\n kwargs[\"hue\"] = self.hue\n self._inject_kwargs(func, kwargs, self._hue_params)\n\n if \"legend\" in func_params:\n kwargs.setdefault(\"legend\", False)\n\n if \"orientation\" in func_params:\n # e.g. plt.hist\n orient_kw_x = {\"orientation\": \"vertical\"}\n orient_kw_y = {\"orientation\": \"horizontal\"}\n elif \"vertical\" in func_params:\n # e.g. sns.distplot (also how did this get backwards?)\n orient_kw_x = {\"vertical\": False}\n orient_kw_y = {\"vertical\": True}\n\n if seaborn_func:\n func(x=self.x, ax=self.ax_marg_x, **kwargs)\n else:\n plt.sca(self.ax_marg_x)\n func(self.x, **orient_kw_x, **kwargs)\n\n if seaborn_func:\n func(y=self.y, ax=self.ax_marg_y, **kwargs)\n else:\n plt.sca(self.ax_marg_y)\n func(self.y, **orient_kw_y, **kwargs)\n\n self.ax_marg_x.yaxis.get_label().set_visible(False)\n self.ax_marg_y.xaxis.get_label().set_visible(False)\n\n return self\n\n def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1861, "name": "_inject_kwargs", "kind": "ref", "category": "function", "info": " self._inject_kwargs(func, kwargs, self._hue_params)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1876, "name": "func", "kind": "ref", "category": "function", "info": " func(x=self.x, ax=self.ax_marg_x, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1879, "name": "func", "kind": "ref", "category": "function", "info": " func(self.x, **orient_kw_x, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1882, "name": "func", "kind": "ref", "category": "function", "info": " func(y=self.y, ax=self.ax_marg_y, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1885, "name": "func", "kind": "ref", "category": "function", "info": " func(self.y, **orient_kw_y, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1887, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1887, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_x.yaxis.get_label().set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1888, "name": "get_label", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1888, "name": "set_visible", "kind": "ref", "category": "function", "info": " self.ax_marg_y.xaxis.get_label().set_visible(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1892, "name": "refline", "kind": "def", "category": "function", "info": " def refline(\n self, *, x=None, y=None, joint=True, marginal=True,\n color='.5', linestyle='--', **line_kws\n ):\n \"\"\"Add a reference line(s) to joint and/or marginal axes.\n\n Parameters\n ----------\n x, y : numeric\n Value(s) to draw the line(s) at.\n joint, marginal : bools\n Whether to add the reference line(s) to the joint/marginal axes.\n color : :mod:`matplotlib color `\n Specifies the color of the reference line(s).\n linestyle : str\n Specifies the style of the reference line(s).\n line_kws : key, value mappings\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.axvline`\n when ``x`` is not None and :meth:`matplotlib.axes.Axes.axhline` when ``y``\n is not None.\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n line_kws['color'] = color\n line_kws['linestyle'] = linestyle\n\n if x is not None:\n if joint:\n self.ax_joint.axvline(x, **line_kws)\n if marginal:\n self.ax_marg_x.axvline(x, **line_kws)\n\n if y is not None:\n if joint:\n self.ax_joint.axhline(y, **line_kws)\n if marginal:\n self.ax_marg_y.axhline(y, **line_kws)\n\n return self\n\n def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1936, "name": "set_axis_labels", "kind": "def", "category": "function", "info": " def set_axis_labels(self, xlabel=\"\", ylabel=\"\", **kwargs):\n \"\"\"Set axis labels on the bivariate axes.\n\n Parameters\n ----------\n xlabel, ylabel : strings\n Label names for the x and y variables.\n kwargs : key, value mappings\n Other keyword arguments are passed to the following functions:\n\n - :meth:`matplotlib.axes.Axes.set_xlabel`\n - :meth:`matplotlib.axes.Axes.set_ylabel`\n\n Returns\n -------\n :class:`JointGrid` instance\n Returns ``self`` for easy method chaining.\n\n \"\"\"\n self.ax_joint.set_xlabel(xlabel, **kwargs)\n self.ax_joint.set_ylabel(ylabel, **kwargs)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1955, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 1956, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " self.ax_joint.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2004, "name": "pairplot", "kind": "def", "category": "function", "info": "def pairplot(\n data, *,\n hue=None, hue_order=None, palette=None,\n vars=None, x_vars=None, y_vars=None,\n kind=\"scatter\", diag_kind=\"auto\", markers=None,\n height=2.5, aspect=1, corner=False, dropna=False,\n plot_kws=None, diag_kws=None, grid_kws=None, size=None,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2113, "name": "PairGrid", "kind": "ref", "category": "function", "info": " grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2143, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(histplot, **diag_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2147, "name": "map_diag", "kind": "ref", "category": "function", "info": " grid.map_diag(kdeplot, **diag_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2157, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(scatterplot, **plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2160, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(regplot, **plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2164, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(kdeplot, **plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2167, "name": "plotter", "kind": "ref", "category": "function", "info": " plotter(histplot, **plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2171, "name": "add_legend", "kind": "ref", "category": "function", "info": " grid.add_legend()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2178, "name": "jointplot", "kind": "def", "category": "function", "info": "def jointplot(\n data=None, *, x=None, y=None, hue=None, kind=\"scatter\",\n height=6, ratio=5, space=.2, dropna=False, xlim=None, ylim=None,\n color=None, palette=None, hue_order=None, hue_norm=None, marginal_ticks=False,\n joint_kws=None, marginal_kws=None,\n **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2217, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", plot_kinds, kind)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2230, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color_rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2231, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " colors = [utils.set_hls_values(color_rgb, l=l) # noqa\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2233, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(colors, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2240, "name": "JointGrid", "kind": "ref", "category": "function", "info": " grid = JointGrid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2254, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(scatterplot, **joint_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2264, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(marg_func, **marginal_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2272, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(histplot, **joint_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2287, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, x=x, hue=hue, **marg_x_kws, ax=grid.ax_marg_x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2288, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(data=data, y=y, hue=hue, **marg_y_kws, ax=grid.ax_marg_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2294, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(kdeplot, **joint_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2300, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(kdeplot, **marginal_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2304, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " x_bins = min(_freedman_diaconis_bins(grid.x), 50)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2305, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " y_bins = min(_freedman_diaconis_bins(grid.y), 50)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2310, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(plt.hexbin, **joint_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2314, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2320, "name": "plot_marginals", "kind": "ref", "category": "function", "info": " grid.plot_marginals(histplot, **marginal_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2323, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(regplot, **joint_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2328, "name": "plot_joint", "kind": "ref", "category": "function", "info": " grid.plot_joint(residplot, **joint_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2330, "name": "get_offsets", "kind": "ref", "category": "function", "info": " x, y = grid.ax_joint.collections[0].get_offsets().T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2332, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(x=x, hue=hue, ax=grid.ax_marg_x, **marginal_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/axisgrid.py", "rel_fname": "seaborn/axisgrid.py", "line": 2333, "name": "histplot", "kind": "ref", "category": "function", "info": " histplot(y=y, hue=hue, ax=grid.ax_marg_y, **marginal_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 49, "name": "_CategoricalPlotterNew", "kind": "def", "category": "class", "info": "__init__\t_hue_backcompat\t_palette_without_hue_backcompat\t_point_kwargs_backcompat\t_err_kws_backcompat\t_scale_backcompat\t_get_gray\t_map_prop_with_hue\t_adjust_cat_axis\t_dodge_needed\t_dodge\t_invert_scale\t_configure_legend\t_native_width\t_nested_offsets\tplot_strips\tplot_swarms\tplot_boxes\tplot_violins\tplot_points\tplot_bars\tplot_errorbars"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 81, "name": "rename", "kind": "ref", "category": "function", "info": " self.plot_data = self.plot_data.rename(columns={\"x\": \"y\", \"y\": \"x\"})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 99, "name": "infer_orient", "kind": "ref", "category": "function", "info": " self.orient = infer_orient(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 121, "name": "categorical_order", "kind": "ref", "category": "function", "info": " cat_levels = categorical_order(self.plot_data[self.orient], order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 124, "name": "_hue_backcompat", "kind": "def", "category": "function", "info": " def _hue_backcompat(self, color, palette, hue_order, force_hue=False):\n \"\"\"Implement backwards compatibility for hue parametrization.\n\n Note: the force_hue parameter is used so that functions can be shown to\n pass existing tests during refactoring and then tested for new behavior.\n It can be removed after completion of the work.\n\n \"\"\"\n # The original categorical functions applied a palette to the categorical axis\n # by default. We want to require an explicit hue mapping, to be more consistent\n # with how things work elsewhere now. I don't think there's any good way to\n # do this gently -- because it's triggered by the default value of hue=None,\n # users would always get a warning, unless we introduce some sentinel \"default\"\n # argument for this change. That's possible, but asking users to set `hue=None`\n # on every call is annoying.\n # We are keeping the logic for implementing the old behavior in with the current\n # system so that (a) we can punt on that decision and (b) we can ensure that\n # refactored code passes old tests.\n default_behavior = color is None or palette is not None\n if force_hue and \"hue\" not in self.variables and default_behavior:\n self._redundant_hue = True\n self.plot_data[\"hue\"] = self.plot_data[self.orient]\n self.variables[\"hue\"] = self.variables[self.orient]\n self.var_types[\"hue\"] = \"categorical\"\n hue_order = self.var_levels[self.orient]\n\n # Because we convert the categorical axis variable to string,\n # we need to update a dictionary palette too\n if isinstance(palette, dict):\n palette = {str(k): v for k, v in palette.items()}\n\n else:\n if \"hue\" in self.variables:\n redundant = (self.plot_data[\"hue\"] == self.plot_data[self.orient]).all()\n else:\n redundant = False\n self._redundant_hue = redundant\n\n # Previously, categorical plots had a trick where color= could seed the palette.\n # Because that's an explicit parameterization, we are going to give it one\n # release cycle with a warning before removing.\n if \"hue\" in self.variables and palette is None and color is not None:\n if not isinstance(color, str):\n color = mpl.colors.to_hex(color)\n palette = f\"dark:{color}\"\n msg = (\n \"\\n\\nSetting a gradient palette using color= is deprecated and will be \"\n f\"removed in v0.14.0. Set `palette='{palette}'` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return palette, hue_order\n\n def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = (\n \"\\n\\nPassing `palette` without assigning `hue` is deprecated \"\n f\"and will be removed in v0.14.0. Assign the `{self.orient}` variable \"\n \"to `hue` and set `legend=False` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.orient]\n self.variables[\"hue\"] = self.variables.get(self.orient)\n self.var_types[\"hue\"] = self.var_types.get(self.orient)\n\n hue_order = self.var_levels.get(self.orient)\n self._var_levels.pop(\"hue\", None)\n\n return hue_order\n\n def _point_kwargs_backcompat(self, scale, join, kwargs):\n \"\"\"Provide two cycles where scale= and join= work, but redirect to kwargs.\"\"\"\n if scale is not deprecated:\n lw = mpl.rcParams[\"lines.linewidth\"] * 1.8 * scale\n mew = lw * .75\n ms = lw * 2\n\n msg = (\n \"\\n\\n\"\n \"The `scale` parameter is deprecated and will be removed in v0.15.0. \"\n \"You can now control the size of each plot element using matplotlib \"\n \"`Line2D` parameters (e.g., `linewidth`, `markersize`, etc.).\"\n \"\\n\"\n )\n warnings.warn(msg, stacklevel=3)\n kwargs.update(linewidth=lw, markeredgewidth=mew, markersize=ms)\n\n if join is not deprecated:\n msg = (\n \"\\n\\n\"\n \"The `join` parameter is deprecated and will be removed in v0.15.0.\"\n )\n if not join:\n msg += (\n \" You can remove the line between points with `linestyle='none'`.\"\n )\n kwargs.update(linestyle=\"\")\n msg += \"\\n\"\n warnings.warn(msg, stacklevel=3)\n\n def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):\n \"\"\"Provide two cycles where existing signature-level err_kws are handled.\"\"\"\n def deprecate_err_param(name, key, val):\n if val is deprecated:\n return\n suggest = f\"err_kws={{'{key}': {val!r}}}\"\n msg = (\n f\"\\n\\nThe `{name}` parameter is deprecated. And will be removed \"\n f\"in v0.15.0. Pass `{suggest}` instead.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n err_kws[key] = val\n\n if errcolor is not None:\n deprecate_err_param(\"errcolor\", \"color\", errcolor)\n deprecate_err_param(\"errwidth\", \"linewidth\", errwidth)\n\n if capsize is None:\n capsize = 0\n msg = (\n \"\\n\\nPassing `capsize=None` is deprecated and will be removed \"\n \"in v0.15.0. Pass `capsize=0` to disable caps.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return err_kws, capsize\n\n def _scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n \"\"\"Provide two cycles of backcompat for scale kwargs\"\"\"\n if scale is not deprecated:\n density_norm = scale\n msg = (\n \"\\n\\nThe `scale` parameter has been renamed and will be removed \"\n f\"in v0.15.0. Pass `density_norm={scale!r}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n if scale_hue is not deprecated:\n common_norm = scale_hue\n msg = (\n \"\\n\\nThe `scale_hue` parameter has been replaced and will be removed \"\n f\"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return density_norm, common_norm\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 167, "name": "to_hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.to_hex(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 177, "name": "_palette_without_hue_backcompat", "kind": "def", "category": "function", "info": " def _palette_without_hue_backcompat(self, palette, hue_order):\n \"\"\"Provide one cycle where palette= implies hue= when not provided\"\"\"\n if \"hue\" not in self.variables and palette is not None:\n msg = (\n \"\\n\\nPassing `palette` without assigning `hue` is deprecated \"\n f\"and will be removed in v0.14.0. Assign the `{self.orient}` variable \"\n \"to `hue` and set `legend=False` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n self.legend = False\n self.plot_data[\"hue\"] = self.plot_data[self.orient]\n self.variables[\"hue\"] = self.variables.get(self.orient)\n self.var_types[\"hue\"] = self.var_types.get(self.orient)\n\n hue_order = self.var_levels.get(self.orient)\n self._var_levels.pop(\"hue\", None)\n\n return hue_order\n\n def _point_kwargs_backcompat(self, scale, join, kwargs):\n \"\"\"Provide two cycles where scale= and join= work, but redirect to kwargs.\"\"\"\n if scale is not deprecated:\n lw = mpl.rcParams[\"lines.linewidth\"] * 1.8 * scale\n mew = lw * .75\n ms = lw * 2\n\n msg = (\n \"\\n\\n\"\n \"The `scale` parameter is deprecated and will be removed in v0.15.0. \"\n \"You can now control the size of each plot element using matplotlib \"\n \"`Line2D` parameters (e.g., `linewidth`, `markersize`, etc.).\"\n \"\\n\"\n )\n warnings.warn(msg, stacklevel=3)\n kwargs.update(linewidth=lw, markeredgewidth=mew, markersize=ms)\n\n if join is not deprecated:\n msg = (\n \"\\n\\n\"\n \"The `join` parameter is deprecated and will be removed in v0.15.0.\"\n )\n if not join:\n msg += (\n \" You can remove the line between points with `linestyle='none'`.\"\n )\n kwargs.update(linestyle=\"\")\n msg += \"\\n\"\n warnings.warn(msg, stacklevel=3)\n\n def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):\n \"\"\"Provide two cycles where existing signature-level err_kws are handled.\"\"\"\n def deprecate_err_param(name, key, val):\n if val is deprecated:\n return\n suggest = f\"err_kws={{'{key}': {val!r}}}\"\n msg = (\n f\"\\n\\nThe `{name}` parameter is deprecated. And will be removed \"\n f\"in v0.15.0. Pass `{suggest}` instead.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n err_kws[key] = val\n\n if errcolor is not None:\n deprecate_err_param(\"errcolor\", \"color\", errcolor)\n deprecate_err_param(\"errwidth\", \"linewidth\", errwidth)\n\n if capsize is None:\n capsize = 0\n msg = (\n \"\\n\\nPassing `capsize=None` is deprecated and will be removed \"\n \"in v0.15.0. Pass `capsize=0` to disable caps.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return err_kws, capsize\n\n def _scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n \"\"\"Provide two cycles of backcompat for scale kwargs\"\"\"\n if scale is not deprecated:\n density_norm = scale\n msg = (\n \"\\n\\nThe `scale` parameter has been renamed and will be removed \"\n f\"in v0.15.0. Pass `density_norm={scale!r}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n if scale_hue is not deprecated:\n common_norm = scale_hue\n msg = (\n \"\\n\\nThe `scale_hue` parameter has been replaced and will be removed \"\n f\"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return density_norm, common_norm\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 197, "name": "_point_kwargs_backcompat", "kind": "def", "category": "function", "info": " def _point_kwargs_backcompat(self, scale, join, kwargs):\n \"\"\"Provide two cycles where scale= and join= work, but redirect to kwargs.\"\"\"\n if scale is not deprecated:\n lw = mpl.rcParams[\"lines.linewidth\"] * 1.8 * scale\n mew = lw * .75\n ms = lw * 2\n\n msg = (\n \"\\n\\n\"\n \"The `scale` parameter is deprecated and will be removed in v0.15.0. \"\n \"You can now control the size of each plot element using matplotlib \"\n \"`Line2D` parameters (e.g., `linewidth`, `markersize`, etc.).\"\n \"\\n\"\n )\n warnings.warn(msg, stacklevel=3)\n kwargs.update(linewidth=lw, markeredgewidth=mew, markersize=ms)\n\n if join is not deprecated:\n msg = (\n \"\\n\\n\"\n \"The `join` parameter is deprecated and will be removed in v0.15.0.\"\n )\n if not join:\n msg += (\n \" You can remove the line between points with `linestyle='none'`.\"\n )\n kwargs.update(linestyle=\"\")\n msg += \"\\n\"\n warnings.warn(msg, stacklevel=3)\n\n def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):\n \"\"\"Provide two cycles where existing signature-level err_kws are handled.\"\"\"\n def deprecate_err_param(name, key, val):\n if val is deprecated:\n return\n suggest = f\"err_kws={{'{key}': {val!r}}}\"\n msg = (\n f\"\\n\\nThe `{name}` parameter is deprecated. And will be removed \"\n f\"in v0.15.0. Pass `{suggest}` instead.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n err_kws[key] = val\n\n if errcolor is not None:\n deprecate_err_param(\"errcolor\", \"color\", errcolor)\n deprecate_err_param(\"errwidth\", \"linewidth\", errwidth)\n\n if capsize is None:\n capsize = 0\n msg = (\n \"\\n\\nPassing `capsize=None` is deprecated and will be removed \"\n \"in v0.15.0. Pass `capsize=0` to disable caps.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return err_kws, capsize\n\n def _scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n \"\"\"Provide two cycles of backcompat for scale kwargs\"\"\"\n if scale is not deprecated:\n density_norm = scale\n msg = (\n \"\\n\\nThe `scale` parameter has been renamed and will be removed \"\n f\"in v0.15.0. Pass `density_norm={scale!r}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n if scale_hue is not deprecated:\n common_norm = scale_hue\n msg = (\n \"\\n\\nThe `scale_hue` parameter has been replaced and will be removed \"\n f\"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return density_norm, common_norm\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 227, "name": "_err_kws_backcompat", "kind": "def", "category": "function", "info": " def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):\n \"\"\"Provide two cycles where existing signature-level err_kws are handled.\"\"\"\n def deprecate_err_param(name, key, val):\n if val is deprecated:\n return\n suggest = f\"err_kws={{'{key}': {val!r}}}\"\n msg = (\n f\"\\n\\nThe `{name}` parameter is deprecated. And will be removed \"\n f\"in v0.15.0. Pass `{suggest}` instead.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n err_kws[key] = val\n\n if errcolor is not None:\n deprecate_err_param(\"errcolor\", \"color\", errcolor)\n deprecate_err_param(\"errwidth\", \"linewidth\", errwidth)\n\n if capsize is None:\n capsize = 0\n msg = (\n \"\\n\\nPassing `capsize=None` is deprecated and will be removed \"\n \"in v0.15.0. Pass `capsize=0` to disable caps.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return err_kws, capsize\n\n def _scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n \"\"\"Provide two cycles of backcompat for scale kwargs\"\"\"\n if scale is not deprecated:\n density_norm = scale\n msg = (\n \"\\n\\nThe `scale` parameter has been renamed and will be removed \"\n f\"in v0.15.0. Pass `density_norm={scale!r}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n if scale_hue is not deprecated:\n common_norm = scale_hue\n msg = (\n \"\\n\\nThe `scale_hue` parameter has been replaced and will be removed \"\n f\"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return density_norm, common_norm\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 229, "name": "deprecate_err_param", "kind": "def", "category": "function", "info": " def deprecate_err_param(name, key, val):\n if val is deprecated:\n return\n suggest = f\"err_kws={{'{key}': {val!r}}}\"\n msg = (\n f\"\\n\\nThe `{name}` parameter is deprecated. And will be removed \"\n f\"in v0.15.0. Pass `{suggest}` instead.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=4)\n err_kws[key] = val\n\n if errcolor is not None:\n deprecate_err_param(\"errcolor\", \"color\", errcolor)\n deprecate_err_param(\"errwidth\", \"linewidth\", errwidth)\n\n if capsize is None:\n capsize = 0\n msg = (\n \"\\n\\nPassing `capsize=None` is deprecated and will be removed \"\n \"in v0.15.0. Pass `capsize=0` to disable caps.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return err_kws, capsize\n\n def _scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n \"\"\"Provide two cycles of backcompat for scale kwargs\"\"\"\n if scale is not deprecated:\n density_norm = scale\n msg = (\n \"\\n\\nThe `scale` parameter has been renamed and will be removed \"\n f\"in v0.15.0. Pass `density_norm={scale!r}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n if scale_hue is not deprecated:\n common_norm = scale_hue\n msg = (\n \"\\n\\nThe `scale_hue` parameter has been replaced and will be removed \"\n f\"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return density_norm, common_norm\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 241, "name": "deprecate_err_param", "kind": "ref", "category": "function", "info": " deprecate_err_param(\"errcolor\", \"color\", errcolor)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 242, "name": "deprecate_err_param", "kind": "ref", "category": "function", "info": " deprecate_err_param(\"errwidth\", \"linewidth\", errwidth)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 254, "name": "_scale_backcompat", "kind": "def", "category": "function", "info": " def _scale_backcompat(self, scale, scale_hue, density_norm, common_norm):\n \"\"\"Provide two cycles of backcompat for scale kwargs\"\"\"\n if scale is not deprecated:\n density_norm = scale\n msg = (\n \"\\n\\nThe `scale` parameter has been renamed and will be removed \"\n f\"in v0.15.0. Pass `density_norm={scale!r}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n if scale_hue is not deprecated:\n common_norm = scale_hue\n msg = (\n \"\\n\\nThe `scale_hue` parameter has been replaced and will be removed \"\n f\"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect.\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return density_norm, common_norm\n\n def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 274, "name": "_get_gray", "kind": "def", "category": "function", "info": " def _get_gray(self, colors):\n \"\"\"Get a grayscale value that looks good with color.\"\"\"\n if not len(colors):\n return None\n colors = [mpl.colors.to_rgb(c) for c in colors]\n unique_colors = np.unique(colors, axis=0)\n light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]\n lum = min(light_vals) * .6\n return (lum, lum, lum)\n\n def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 278, "name": "to_rgb", "kind": "ref", "category": "function", "info": " colors = [mpl.colors.to_rgb(c) for c in colors]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 284, "name": "_map_prop_with_hue", "kind": "def", "category": "function", "info": " def _map_prop_with_hue(self, name, value, fallback, plot_kws):\n \"\"\"Support pointplot behavior of modifying the marker/linestyle with hue.\"\"\"\n if value is default:\n value = plot_kws.pop(name, fallback)\n\n if (levels := self._hue_map.levels) is None:\n mapping = {None: value}\n else:\n if isinstance(value, list):\n mapping = {k: v for k, v in zip(levels, value)}\n else:\n mapping = {k: value for k in levels}\n\n return mapping\n\n def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 299, "name": "_adjust_cat_axis", "kind": "def", "category": "function", "info": " def _adjust_cat_axis(self, ax, axis):\n \"\"\"Set ticks and limits for a categorical variable.\"\"\"\n # Note: in theory, this could happen in _attach for all categorical axes\n # But two reasons not to do that:\n # - If it happens before plotting, autoscaling messes up the plot limits\n # - It would change existing plots from other seaborn functions\n if self.var_types[axis] != \"categorical\":\n return\n\n # If both x/y data are empty, the correct way to set up the plot is\n # somewhat undefined; because we don't add null category data to the plot in\n # this case we don't *have* a categorical axis (yet), so best to just bail.\n if self.plot_data[axis].empty:\n return\n\n # We can infer the total number of categories (including those from previous\n # plots that are not part of the plot we are currently making) from the number\n # of ticks, which matplotlib sets up while doing unit conversion. This feels\n # slightly risky, as if we are relying on something that may be a matplotlib\n # implementation detail. But I cannot think of a better way to keep track of\n # the state from previous categorical calls (see GH2516 for context)\n n = len(getattr(ax, f\"get_{axis}ticks\")())\n\n if axis == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, n - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n # Note limits that correspond to previously-inverted y axis\n ax.set_ylim(n - .5, -.5, auto=None)\n\n def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 324, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, n - .5, auto=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 328, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(n - .5, -.5, auto=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 330, "name": "_dodge_needed", "kind": "def", "category": "function", "info": " def _dodge_needed(self):\n \"\"\"Return True when use of `hue` would cause overlaps.\"\"\"\n groupers = list({self.orient, \"col\", \"row\"} & set(self.variables))\n if \"hue\" in self.variables:\n orient = self.plot_data[groupers].value_counts()\n paired = self.plot_data[[*groupers, \"hue\"]].value_counts()\n return orient.size != paired.size\n return False\n\n def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 339, "name": "_dodge", "kind": "def", "category": "function", "info": " def _dodge(self, keys, data):\n \"\"\"Apply a dodge transform to coordinates in place.\"\"\"\n hue_idx = self._hue_map.levels.index(keys[\"hue\"])\n n = len(self._hue_map.levels)\n data[\"width\"] /= n\n\n full_width = data[\"width\"] * n\n offset = data[\"width\"] * hue_idx + data[\"width\"] / 2 - full_width / 2\n data[self.orient] += offset\n\n def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 349, "name": "_invert_scale", "kind": "def", "category": "function", "info": " def _invert_scale(self, ax, data, vars=(\"x\", \"y\")):\n \"\"\"Undo scaling after computation so data are plotted correctly.\"\"\"\n for var in vars:\n _, inv = utils._get_transform_functions(ax, var[0])\n if var == self.orient and \"width\" in data:\n hw = data[\"width\"] / 2\n data[\"edge\"] = inv(data[var] - hw)\n data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n for suf in [\"\", \"min\", \"max\"]:\n if (col := f\"{var}{suf}\") in data:\n data[col] = inv(data[col])\n\n def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 352, "name": "_get_transform_functions", "kind": "ref", "category": "function", "info": " _, inv = utils._get_transform_functions(ax, var[0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 355, "name": "inv", "kind": "ref", "category": "function", "info": " data[\"edge\"] = inv(data[var] - hw)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 356, "name": "inv", "kind": "ref", "category": "function", "info": " data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 356, "name": "to_numpy", "kind": "ref", "category": "function", "info": " data[\"width\"] = inv(data[var] + hw) - data[\"edge\"].to_numpy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 359, "name": "inv", "kind": "ref", "category": "function", "info": " data[col] = inv(data[col])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 361, "name": "_configure_legend", "kind": "def", "category": "function", "info": " def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):\n\n if self.legend == \"auto\":\n show_legend = not self._redundant_hue and self.input_format != \"wide\"\n else:\n show_legend = bool(self.legend)\n\n if show_legend:\n self.add_legend_data(ax, func, common_kws, semantic_kws)\n handles, _ = ax.get_legend_handles_labels()\n if handles:\n ax.legend(title=self.legend_title)\n\n @property\n def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 369, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, func, common_kws, semantic_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 370, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 375, "name": "_native_width", "kind": "def", "category": "function", "info": " def _native_width(self):\n \"\"\"Return unit of width separating categories on native numeric scale.\"\"\"\n # Categorical data always have a unit width\n if self.var_types[self.orient] == \"categorical\":\n return 1\n\n # Otherwise, define the width as the smallest space between observations\n unique_values = np.unique(self.comp_data[self.orient])\n if len(unique_values) > 1:\n native_width = np.nanmin(np.diff(unique_values))\n else:\n native_width = 1\n return native_width\n\n def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 389, "name": "_nested_offsets", "kind": "def", "category": "function", "info": " def _nested_offsets(self, width, dodge):\n \"\"\"Return offsets for each hue level for dodged plots.\"\"\"\n offsets = None\n if \"hue\" in self.variables and self._hue_map.levels is not None:\n n_levels = len(self._hue_map.levels)\n if dodge:\n each_width = width / n_levels\n offsets = np.linspace(0, width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n return offsets\n\n # Note that the plotting methods here aim (in most cases) to produce the\n # exact same artists as the original (pre 0.12) version of the code, so\n # there is some weirdness that might not otherwise be clean or make sense in\n # this context, such as adding empty artists for combinations of variables\n # with no observations\n\n def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 408, "name": "plot_strips", "kind": "def", "category": "function", "info": " def plot_strips(\n self,\n jitter,\n dodge,\n color,\n edgecolor,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n if jitter is True:\n jlim = 0.1\n else:\n jlim = float(jitter)\n if \"hue\" in self.variables and dodge and self._hue_map.levels is not None:\n jlim /= len(self._hue_map.levels)\n jlim *= self._native_width\n jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n dodge_move = jitter_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None and (offsets != 0).any():\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n\n adjusted_data = sub_data[self.orient] + dodge_move + jitter_move\n sub_data[self.orient] = adjusted_data\n self._invert_scale(ax, sub_data)\n\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n self._configure_legend(ax, ax.scatter)\n\n def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 418, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 436, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 440, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 445, "name": "jitterer", "kind": "ref", "category": "function", "info": " jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 449, "name": "_invert_scale", "kind": "ref", "category": "function", "info": " self._invert_scale(ax, sub_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 454, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 457, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 461, "name": "_configure_legend", "kind": "ref", "category": "function", "info": " self._configure_legend(ax, ax.scatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 463, "name": "plot_swarms", "kind": "def", "category": "function", "info": " def plot_swarms(\n self,\n dodge,\n color,\n edgecolor,\n warn_thresh,\n plot_kws,\n ):\n\n width = .8 * self._native_width\n offsets = self._nested_offsets(width, dodge)\n\n iter_vars = [self.orient]\n if dodge:\n iter_vars.append(\"hue\")\n\n ax = self.ax\n point_collections = {}\n dodge_move = 0\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n if offsets is not None:\n dodge_move = offsets[sub_data[\"hue\"].map(self._hue_map.levels.index)]\n\n if not sub_data.empty:\n sub_data[self.orient] = sub_data[self.orient] + dodge_move\n\n self._invert_scale(ax, sub_data)\n points = ax.scatter(sub_data[\"x\"], sub_data[\"y\"], color=color, **plot_kws)\n\n if \"hue\" in self.variables:\n points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n\n if edgecolor == \"gray\": # XXX TODO change to \"auto\"\n points.set_edgecolors(self._get_gray(points.get_facecolors()))\n else:\n points.set_edgecolors(edgecolor)\n\n if not sub_data.empty:\n point_collections[(ax, sub_data[self.orient].iloc[0])] = points\n\n beeswarm = Beeswarm(\n width=width, orient=self.orient, warn_thresh=warn_thresh,\n )\n for (ax, center), points in point_collections.items():\n if points.get_offsets().shape[0] > 1:\n\n def draw(points, renderer, *, center=center):\n\n beeswarm(points, center)\n\n if self.orient == \"y\":\n scalex = False\n scaley = ax.get_autoscaley_on()\n else:\n scalex = ax.get_autoscalex_on()\n scaley = False\n\n # This prevents us from undoing the nice categorical axis limits\n # set in _adjust_cat_axis, because that method currently leave\n # the autoscale flag in its original setting. It may be better\n # to disable autoscaling there to avoid needing to do this.\n fixed_scale = self.var_types[self.orient] == \"categorical\"\n ax.update_datalim(points.get_datalim(ax.transData))\n if not fixed_scale and (scalex or scaley):\n ax.autoscale_view(scalex=scalex, scaley=scaley)\n\n super(points.__class__, points).draw(renderer)\n\n points.draw = draw.__get__(points)\n\n _draw_figure(ax.figure)\n self._configure_legend(ax, ax.scatter)\n\n def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 473, "name": "_nested_offsets", "kind": "ref", "category": "function", "info": " offsets = self._nested_offsets(width, dodge)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 483, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 487, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 495, "name": "_invert_scale", "kind": "ref", "category": "function", "info": " self._invert_scale(ax, sub_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 499, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(sub_data[\"hue\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 502, "name": "_get_gray", "kind": "ref", "category": "function", "info": " points.set_edgecolors(self._get_gray(points.get_facecolors()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 509, "name": "Beeswarm", "kind": "ref", "category": "function", "info": " beeswarm = Beeswarm(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 517, "name": "beeswarm", "kind": "ref", "category": "function", "info": " beeswarm(points, center)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 521, "name": "get_autoscaley_on", "kind": "ref", "category": "function", "info": " scaley = ax.get_autoscaley_on()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 523, "name": "get_autoscalex_on", "kind": "ref", "category": "function", "info": " scalex = ax.get_autoscalex_on()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 531, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(points.get_datalim(ax.transData))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 533, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=scalex, scaley=scaley)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 539, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 540, "name": "_configure_legend", "kind": "ref", "category": "function", "info": " self._configure_legend(ax, ax.scatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 542, "name": "plot_boxes", "kind": "def", "category": "function", "info": " def plot_boxes(\n self,\n width,\n dodge,\n gap,\n fill,\n whis,\n color,\n linecolor,\n linewidth,\n fliersize,\n plot_kws, # TODO rename user_kws?\n ):\n\n iter_vars = [\"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 561, "name": "_get_gray", "kind": "ref", "category": "function", "info": " linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 563, "name": "_get_gray", "kind": "ref", "category": "function", "info": " linecolor = self._get_gray([color])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 565, "name": "get_props", "kind": "def", "category": "function", "info": " def get_props(element, artist=mpl.lines.Line2D):\n return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n\n if not fill and linewidth is None:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n plot_kws.setdefault(\"shownotches\", plot_kws.pop(\"notch\", False))\n\n box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D\n props = {\n \"box\": get_props(\"box\", box_artist),\n \"median\": get_props(\"median\"),\n \"whisker\": get_props(\"whisker\"),\n \"flier\": get_props(\"flier\"),\n \"cap\": get_props(\"cap\"),\n }\n\n props[\"median\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"whisker\"].setdefault(\"solid_capstyle\", \"butt\")\n props[\"flier\"].setdefault(\"markersize\", fliersize)\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n ax = self._get_axes(sub_vars)\n\n grouped = sub_data.groupby(self.orient)[value_var]\n value_data = [x.to_numpy() for _, x in grouped]\n stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n positions = grouped.grouper.result_index.to_numpy(dtype=float)\n\n orig_width = width * self._native_width\n data = pd.DataFrame({self.orient: positions, \"width\": orig_width})\n if dodge:\n self._dodge(sub_vars, data)\n if gap:\n data[\"width\"] *= 1 - gap\n capwidth = plot_kws.get(\"capwidths\", 0.5 * data[\"width\"])\n\n self._invert_scale(ax, data)\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # TODO how to handle solid / empty fliers?\n\n if fill:\n boxprops = {\n \"facecolor\": maincolor, \"edgecolor\": linecolor, **props[\"box\"]\n }\n medianprops = {\"color\": linecolor, **props[\"median\"]}\n whiskerprops = {\"color\": linecolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": linecolor, **props[\"flier\"]}\n capprops = {\"color\": linecolor, **props[\"cap\"]}\n else:\n boxprops = {\"color\": maincolor, **props[\"box\"]}\n medianprops = {\"color\": maincolor, **props[\"median\"]}\n whiskerprops = {\"color\": maincolor, **props[\"whisker\"]}\n flierprops = {\"markeredgecolor\": maincolor, **props[\"flier\"]}\n capprops = {\"color\": maincolor, **props[\"cap\"]}\n\n if linewidth is not None:\n for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:\n prop_dict.setdefault(\"linewidth\", linewidth)\n\n default_kws = dict(\n bxpstats=stats.to_dict(\"records\"),\n positions=data[self.orient],\n # Set width to 0 with log scaled orient axis to avoid going < 0\n widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n patch_artist=fill,\n vert=self.orient == \"x\",\n manage_ticks=False,\n boxprops=boxprops,\n medianprops=medianprops,\n whiskerprops=whiskerprops,\n flierprops=flierprops,\n capprops=capprops,\n # Added in matplotlib 3.6.0; see below\n # capwidths=capwidth,\n **(\n {} if _version_predates(mpl, \"3.6.0\")\n else {\"capwidths\": capwidth}\n )\n )\n boxplot_kws = {**default_kws, **plot_kws}\n artists = ax.bxp(**boxplot_kws)\n\n # Reset artist widths after adding so everything stays positive\n ori_idx = [\"x\", \"y\"].index(self.orient)\n if self._log_scaled(self.orient):\n for i, box in enumerate(data.to_dict(\"records\")):\n p0 = box[\"edge\"]\n p1 = box[\"edge\"] + box[\"width\"]\n\n if artists[\"boxes\"]:\n box_artist = artists[\"boxes\"][i]\n if fill:\n box_verts = box_artist.get_path().vertices.T\n else:\n box_verts = box_artist.get_data()\n box_verts[ori_idx][0] = p0\n box_verts[ori_idx][3:] = p0\n box_verts[ori_idx][1:3] = p1\n if not fill:\n # When fill is True, the data get changed in place\n box_artist.set_data(box_verts)\n # TODO XXX don't update value dimension; don't shrink orient dim\n ax.update_datalim(np.transpose(box_verts))\n\n if artists[\"medians\"]:\n verts = artists[\"medians\"][i].get_xydata().T\n verts[ori_idx][:] = p0, p1\n artists[\"medians\"][i].set_data(verts)\n\n if artists[\"caps\"]:\n for line in artists[\"caps\"][2 * i:2 * i + 2]:\n p0 = 10 ** (np.log10(box[self.orient]) - capwidth[i] / 2)\n p1 = 10 ** (np.log10(box[self.orient]) + capwidth[i] / 2)\n verts = line.get_xydata().T\n verts[ori_idx][:] = p0, p1\n line.set_data(verts)\n\n ax.add_container(BoxPlotContainer(artists))\n\n patch_kws = props[\"box\"].copy()\n if not fill:\n patch_kws[\"facecolor\"] = (1, 1, 1, 0)\n else:\n patch_kws[\"edgecolor\"] = linecolor\n self._configure_legend(ax, ax.fill_between, patch_kws)\n\n def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 566, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " return _normalize_kwargs(plot_kws.pop(f\"{element}props\", {}), artist)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 575, "name": "get_props", "kind": "ref", "category": "function", "info": " \"box\": get_props(\"box\", box_artist),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 576, "name": "get_props", "kind": "ref", "category": "function", "info": " \"median\": get_props(\"median\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 577, "name": "get_props", "kind": "ref", "category": "function", "info": " \"whisker\": get_props(\"whisker\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 578, "name": "get_props", "kind": "ref", "category": "function", "info": " \"flier\": get_props(\"flier\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 579, "name": "get_props", "kind": "ref", "category": "function", "info": " \"cap\": get_props(\"cap\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 588, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 592, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 594, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped = sub_data.groupby(self.orient)[value_var]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 595, "name": "to_numpy", "kind": "ref", "category": "function", "info": " value_data = [x.to_numpy() for _, x in grouped]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 596, "name": "boxplot_stats", "kind": "ref", "category": "function", "info": " stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 597, "name": "to_numpy", "kind": "ref", "category": "function", "info": " positions = grouped.grouper.result_index.to_numpy(dtype=float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 602, "name": "_dodge", "kind": "ref", "category": "function", "info": " self._dodge(sub_vars, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 607, "name": "_invert_scale", "kind": "ref", "category": "function", "info": " self._invert_scale(ax, data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 609, "name": "_hue_map", "kind": "ref", "category": "function", "info": " maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 633, "name": "to_dict", "kind": "ref", "category": "function", "info": " bxpstats=stats.to_dict(\"records\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 636, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " widths=0 if self._log_scaled(self.orient) else data[\"width\"],\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 648, "name": "_version_predates", "kind": "ref", "category": "function", "info": " {} if _version_predates(mpl, \"3.6.0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 653, "name": "bxp", "kind": "ref", "category": "function", "info": " artists = ax.bxp(**boxplot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 657, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.orient):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 658, "name": "to_dict", "kind": "ref", "category": "function", "info": " for i, box in enumerate(data.to_dict(\"records\")):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 665, "name": "get_path", "kind": "ref", "category": "function", "info": " box_verts = box_artist.get_path().vertices.T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 667, "name": "get_data", "kind": "ref", "category": "function", "info": " box_verts = box_artist.get_data()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 673, "name": "set_data", "kind": "ref", "category": "function", "info": " box_artist.set_data(box_verts)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 675, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(np.transpose(box_verts))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 678, "name": "get_xydata", "kind": "ref", "category": "function", "info": " verts = artists[\"medians\"][i].get_xydata().T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 680, "name": "set_data", "kind": "ref", "category": "function", "info": " artists[\"medians\"][i].set_data(verts)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 686, "name": "get_xydata", "kind": "ref", "category": "function", "info": " verts = line.get_xydata().T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 688, "name": "set_data", "kind": "ref", "category": "function", "info": " line.set_data(verts)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 690, "name": "add_container", "kind": "ref", "category": "function", "info": " ax.add_container(BoxPlotContainer(artists))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 690, "name": "BoxPlotContainer", "kind": "ref", "category": "function", "info": " ax.add_container(BoxPlotContainer(artists))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 697, "name": "_configure_legend", "kind": "ref", "category": "function", "info": " self._configure_legend(ax, ax.fill_between, patch_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 699, "name": "plot_violins", "kind": "def", "category": "function", "info": " def plot_violins(\n self,\n width,\n dodge,\n gap,\n split,\n color,\n fill,\n linecolor,\n linewidth,\n inner,\n density_norm,\n common_norm,\n kde_kws,\n inner_kws,\n plot_kws,\n ):\n\n iter_vars = [self.orient, \"hue\"]\n value_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n\n inner_options = [\"box\", \"quart\", \"stick\", \"point\", None]\n _check_argument(\"inner\", inner_options, inner, prefix=True)\n _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n\n if linecolor is None:\n if \"hue\" in self.variables:\n linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n else:\n linecolor = self._get_gray([color])\n\n if linewidth is None:\n if fill:\n linewidth = 1.25 * mpl.rcParams[\"patch.linewidth\"]\n else:\n linewidth = mpl.rcParams[\"lines.linewidth\"]\n\n if inner is not None and inner.startswith(\"box\"):\n box_width = inner_kws.pop(\"box_width\", linewidth * 4.5)\n whis_width = inner_kws.pop(\"whis_width\", box_width / 3)\n marker = inner_kws.pop(\"marker\", \"_\" if self.orient == \"x\" else \"|\")\n\n kde = KDE(**kde_kws)\n ax = self.ax\n violin_data = []\n\n # Iterate through all the data splits once to compute the KDEs\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=False):\n\n sub_data[\"weight\"] = sub_data.get(\"weights\", 1)\n stat_data = kde._transform(sub_data, value_var, [])\n\n maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n if not fill:\n linecolor = maincolor\n maincolor = \"none\"\n default_kws = dict(\n facecolor=maincolor,\n edgecolor=linecolor,\n linewidth=linewidth,\n )\n\n violin_data.append({\n \"position\": sub_vars[self.orient],\n \"observations\": sub_data[value_var],\n \"density\": stat_data[\"density\"],\n \"support\": stat_data[value_var],\n \"kwargs\": {**default_kws, **plot_kws},\n \"sub_vars\": sub_vars,\n \"ax\": self._get_axes(sub_vars),\n })\n\n # Once we've computed all the KDEs, get statistics for normalization\n def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 721, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"inner\", inner_options, inner, prefix=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 722, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"density_norm\", [\"area\", \"count\", \"width\"], density_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 726, "name": "_get_gray", "kind": "ref", "category": "function", "info": " linecolor = self._get_gray(list(self._hue_map.lookup_table.values()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 728, "name": "_get_gray", "kind": "ref", "category": "function", "info": " linecolor = self._get_gray([color])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 741, "name": "KDE", "kind": "ref", "category": "function", "info": " kde = KDE(**kde_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 746, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 751, "name": "_transform", "kind": "ref", "category": "function", "info": " stat_data = kde._transform(sub_data, value_var, [])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 753, "name": "_hue_map", "kind": "ref", "category": "function", "info": " maincolor = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 770, "name": "_get_axes", "kind": "ref", "category": "function", "info": " \"ax\": self._get_axes(sub_vars),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 774, "name": "vars_to_key", "kind": "def", "category": "function", "info": " def vars_to_key(sub_vars):\n return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)\n\n norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n if common_norm:\n common_max_density = np.nanmax([v[\"density\"].max() for v in violin_data])\n common_max_count = np.nanmax([len(v[\"observations\"]) for v in violin_data])\n max_density = {key: common_max_density for key in norm_keys}\n max_count = {key: common_max_count for key in norm_keys}\n else:\n max_density = {\n key: np.nanmax([\n v[\"density\"].max() for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n max_count = {\n key: np.nanmax([\n len(v[\"observations\"]) for v in violin_data\n if vars_to_key(v[\"sub_vars\"]) == key\n ]) for key in norm_keys\n }\n\n real_width = width * self._native_width\n\n # Now iterate through the violins again to apply the normalization and plot\n for violin in violin_data:\n\n index = pd.RangeIndex(0, max(len(violin[\"support\"]), 1))\n data = pd.DataFrame({\n self.orient: violin[\"position\"],\n value_var: violin[\"support\"],\n \"density\": violin[\"density\"],\n \"width\": real_width,\n }, index=index)\n\n if dodge:\n self._dodge(violin[\"sub_vars\"], data)\n if gap:\n data[\"width\"] *= 1 - gap\n\n # Normalize the density across the distribution(s) and relative to the width\n norm_key = vars_to_key(violin[\"sub_vars\"])\n hw = data[\"width\"] / 2\n peak_density = violin[\"density\"].max()\n if np.isnan(peak_density):\n span = 1\n elif density_norm == \"area\":\n span = data[\"density\"] / max_density[norm_key]\n elif density_norm == \"count\":\n count = len(violin[\"observations\"])\n span = data[\"density\"] / peak_density * (count / max_count[norm_key])\n elif density_norm == \"width\":\n span = data[\"density\"] / peak_density\n span = span * hw * (2 if split else 1)\n\n # Handle split violins (i.e. asymmetric spans)\n right_side = (\n 0 if \"hue\" not in self.variables\n else self._hue_map.levels.index(violin[\"sub_vars\"][\"hue\"]) % 2\n )\n if split:\n offsets = (hw, span - hw) if right_side else (span - hw, hw)\n else:\n offsets = span, span\n\n ax = violin[\"ax\"]\n _, invx = utils._get_transform_functions(ax, \"x\")\n _, invy = utils._get_transform_functions(ax, \"y\")\n inv_pos = {\"x\": invx, \"y\": invy}[self.orient]\n inv_val = {\"x\": invx, \"y\": invy}[value_var]\n\n linecolor = violin[\"kwargs\"][\"edgecolor\"]\n\n # Handle singular datasets (one or more observations with no variance\n if np.isnan(peak_density):\n pos = data[self.orient].iloc[0]\n val = violin[\"observations\"].mean()\n if self.orient == \"x\":\n x, y = [pos - offsets[0], pos + offsets[1]], [val, val]\n else:\n x, y = [val, val], [pos - offsets[0], pos + offsets[1]]\n ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n continue\n\n # Plot the main violin body\n plot_func = {\"x\": ax.fill_betweenx, \"y\": ax.fill_between}[self.orient]\n plot_func(\n inv_val(data[value_var]),\n inv_pos(data[self.orient] - offsets[0]),\n inv_pos(data[self.orient] + offsets[1]),\n **violin[\"kwargs\"]\n )\n\n # Adjust the observation data\n obs = violin[\"observations\"]\n pos_dict = {self.orient: violin[\"position\"], \"width\": real_width}\n if dodge:\n self._dodge(violin[\"sub_vars\"], pos_dict)\n if gap:\n pos_dict[\"width\"] *= (1 - gap)\n\n # --- Plot the inner components\n if inner is None:\n continue\n\n elif inner.startswith(\"point\"):\n pos = np.array([pos_dict[self.orient]] * len(obs))\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n x, y = (pos, obs) if self.orient == \"x\" else (obs, pos)\n kws = {\n \"color\": linecolor,\n \"edgecolor\": linecolor,\n \"s\": (linewidth * 2) ** 2,\n \"zorder\": violin[\"kwargs\"].get(\"zorder\", 2) + 1,\n **inner_kws,\n }\n ax.scatter(invx(x), invy(y), **kws)\n\n elif inner.startswith(\"stick\"):\n pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(obs), inv_val(obs)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)\n if self.orient == \"y\":\n segments = segments[:, :, ::-1]\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth / 2,\n **inner_kws,\n }\n lines = mpl.collections.LineCollection(segments, **kws)\n ax.add_collection(lines, autolim=False)\n\n elif inner.startswith(\"quart\"):\n stats = np.percentile(obs, [25, 50, 75])\n pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])\n pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])\n pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n val_pts = np.stack([inv_val(stats), inv_val(stats)])\n segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)\n if self.orient == \"y\":\n segments = segments[:, ::-1, :]\n dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]\n for i, segment in enumerate(segments):\n kws = {\n \"color\": linecolor,\n \"linewidth\": linewidth,\n \"dashes\": dashes[i],\n **inner_kws,\n }\n ax.plot(*segment, **kws)\n\n elif inner.startswith(\"box\"):\n stats = mpl.cbook.boxplot_stats(obs)[0]\n pos = np.array(pos_dict[self.orient])\n if split:\n pos += (-1 if right_side else 1) * pos_dict[\"width\"] / 2\n pos = [pos, pos], [pos, pos], [pos]\n val = (\n [stats[\"whislo\"], stats[\"whishi\"]],\n [stats[\"q1\"], stats[\"q3\"]],\n [stats[\"med\"]]\n )\n if self.orient == \"x\":\n (x0, x1, x2), (y0, y1, y2) = pos, val\n else:\n (x0, x1, x2), (y0, y1, y2) = val, pos\n\n if split:\n offset = (1 if right_side else -1) * box_width / 72 / 2\n dx, dy = (offset, 0) if self.orient == \"x\" else (0, -offset)\n trans = ax.transData + mpl.transforms.ScaledTranslation(\n dx, dy, ax.figure.dpi_scale_trans,\n )\n else:\n trans = ax.transData\n line_kws = {\n \"color\": linecolor,\n \"transform\": trans,\n **inner_kws,\n \"linewidth\": whis_width,\n }\n ax.plot(invx(x0), invy(y0), **line_kws)\n line_kws[\"linewidth\"] = box_width\n ax.plot(invx(x1), invy(y1), **line_kws)\n dot_kws = {\n \"marker\": marker,\n \"markersize\": box_width / 1.2,\n \"markeredgewidth\": box_width / 5,\n \"transform\": trans,\n **inner_kws,\n \"markeredgecolor\": \"w\",\n \"markerfacecolor\": \"w\",\n \"color\": linecolor, # simplify tests\n }\n ax.plot(invx(x2), invy(y2), **dot_kws)\n\n self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n\n def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 777, "name": "vars_to_key", "kind": "ref", "category": "function", "info": " norm_keys = [vars_to_key(violin[\"sub_vars\"]) for violin in violin_data]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 787, "name": "vars_to_key", "kind": "ref", "category": "function", "info": " if vars_to_key(v[\"sub_vars\"]) == key\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 793, "name": "vars_to_key", "kind": "ref", "category": "function", "info": " if vars_to_key(v[\"sub_vars\"]) == key\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 811, "name": "_dodge", "kind": "ref", "category": "function", "info": " self._dodge(violin[\"sub_vars\"], data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 816, "name": "vars_to_key", "kind": "ref", "category": "function", "info": " norm_key = vars_to_key(violin[\"sub_vars\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 841, "name": "_get_transform_functions", "kind": "ref", "category": "function", "info": " _, invx = utils._get_transform_functions(ax, \"x\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 842, "name": "_get_transform_functions", "kind": "ref", "category": "function", "info": " _, invy = utils._get_transform_functions(ax, \"y\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 856, "name": "invx", "kind": "ref", "category": "function", "info": " ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 856, "name": "invy", "kind": "ref", "category": "function", "info": " ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 861, "name": "plot_func", "kind": "ref", "category": "function", "info": " plot_func(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 862, "name": "inv_val", "kind": "ref", "category": "function", "info": " inv_val(data[value_var]),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 863, "name": "inv_pos", "kind": "ref", "category": "function", "info": " inv_pos(data[self.orient] - offsets[0]),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 864, "name": "inv_pos", "kind": "ref", "category": "function", "info": " inv_pos(data[self.orient] + offsets[1]),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 872, "name": "_dodge", "kind": "ref", "category": "function", "info": " self._dodge(violin[\"sub_vars\"], pos_dict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 892, "name": "invx", "kind": "ref", "category": "function", "info": " ax.scatter(invx(x), invy(y), **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 892, "name": "invy", "kind": "ref", "category": "function", "info": " ax.scatter(invx(x), invy(y), **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 897, "name": "inv_pos", "kind": "ref", "category": "function", "info": " pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 897, "name": "inv_pos", "kind": "ref", "category": "function", "info": " pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 898, "name": "inv_val", "kind": "ref", "category": "function", "info": " val_pts = np.stack([inv_val(obs), inv_val(obs)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 898, "name": "inv_val", "kind": "ref", "category": "function", "info": " val_pts = np.stack([inv_val(obs), inv_val(obs)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 907, "name": "LineCollection", "kind": "ref", "category": "function", "info": " lines = mpl.collections.LineCollection(segments, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 908, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines, autolim=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 914, "name": "inv_pos", "kind": "ref", "category": "function", "info": " pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 914, "name": "inv_pos", "kind": "ref", "category": "function", "info": " pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 915, "name": "inv_val", "kind": "ref", "category": "function", "info": " val_pts = np.stack([inv_val(stats), inv_val(stats)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 915, "name": "inv_val", "kind": "ref", "category": "function", "info": " val_pts = np.stack([inv_val(stats), inv_val(stats)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 930, "name": "boxplot_stats", "kind": "ref", "category": "function", "info": " stats = mpl.cbook.boxplot_stats(obs)[0]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 948, "name": "ScaledTranslation", "kind": "ref", "category": "function", "info": " trans = ax.transData + mpl.transforms.ScaledTranslation(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 959, "name": "invx", "kind": "ref", "category": "function", "info": " ax.plot(invx(x0), invy(y0), **line_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 959, "name": "invy", "kind": "ref", "category": "function", "info": " ax.plot(invx(x0), invy(y0), **line_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 961, "name": "invx", "kind": "ref", "category": "function", "info": " ax.plot(invx(x1), invy(y1), **line_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 961, "name": "invy", "kind": "ref", "category": "function", "info": " ax.plot(invx(x1), invy(y1), **line_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 972, "name": "invx", "kind": "ref", "category": "function", "info": " ax.plot(invx(x2), invy(y2), **dot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 972, "name": "invy", "kind": "ref", "category": "function", "info": " ax.plot(invx(x2), invy(y2), **dot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 974, "name": "_configure_legend", "kind": "ref", "category": "function", "info": " self._configure_legend(ax, ax.fill_between) # TODO, patch_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 976, "name": "plot_points", "kind": "def", "category": "function", "info": " def plot_points(\n self,\n aggregator,\n markers,\n linestyles,\n dodge,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n plot_kws.setdefault(\"linewidth\", mpl.rcParams[\"lines.linewidth\"] * 1.8)\n plot_kws.setdefault(\"markeredgewidth\", plot_kws[\"linewidth\"] * 0.75)\n plot_kws.setdefault(\"markersize\", plot_kws[\"linewidth\"] * np.sqrt(2 * np.pi))\n\n markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n\n positions = self.var_levels[self.orient]\n if self.var_types[self.orient] == \"categorical\":\n min_cat_val = int(self.comp_data[self.orient].min())\n max_cat_val = int(self.comp_data[self.orient].max())\n positions = [i for i in range(min_cat_val, max_cat_val + 1)]\n else:\n if self._log_scaled(self.orient):\n positions = np.log10(positions)\n if self.var_types[self.orient] == \"datetime\":\n positions = mpl.dates.date2num(positions)\n positions = pd.Index(positions, name=self.orient)\n\n n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)\n if dodge is True:\n dodge = .025 * n_hue_levels\n\n ax = self.ax\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reindex(positions)\n .reset_index()\n )\n\n if dodge:\n hue_idx = self._hue_map.levels.index(sub_vars[\"hue\"])\n offset = -dodge * (n_hue_levels - 1) / 2 + dodge * hue_idx\n agg_data[self.orient] += offset * self._native_width\n\n self._invert_scale(ax, agg_data)\n\n sub_kws = plot_kws.copy()\n sub_kws.update(\n marker=markers[sub_vars.get(\"hue\")],\n linestyle=linestyles[sub_vars.get(\"hue\")],\n color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n )\n\n line, = ax.plot(agg_data[\"x\"], agg_data[\"y\"], **sub_kws)\n\n sub_err_kws = err_kws.copy()\n line_props = line.properties()\n for prop in [\"color\", \"linewidth\", \"alpha\", \"zorder\"]:\n sub_err_kws.setdefault(prop, line_props[prop])\n if aggregator.error_method is not None:\n self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n\n semantic_kws = {\"hue\": {\"marker\": markers, \"linestyle\": linestyles}}\n self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n\n def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 991, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " plot_kws = _normalize_kwargs(plot_kws, mpl.lines.Line2D)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 996, "name": "_map_prop_with_hue", "kind": "ref", "category": "function", "info": " markers = self._map_prop_with_hue(\"marker\", markers, \"o\", plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 997, "name": "_map_prop_with_hue", "kind": "ref", "category": "function", "info": " linestyles = self._map_prop_with_hue(\"linestyle\", linestyles, \"-\", plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1005, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.orient):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1008, "name": "date2num", "kind": "ref", "category": "function", "info": " positions = mpl.dates.date2num(positions)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1017, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1021, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1025, "name": "groupby", "kind": "ref", "category": "function", "info": " .groupby(self.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1026, "name": "apply", "kind": "ref", "category": "function", "info": " .apply(aggregator, agg_var)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1027, "name": "reindex", "kind": "ref", "category": "function", "info": " .reindex(positions)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1028, "name": "reset_index", "kind": "ref", "category": "function", "info": " .reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1036, "name": "_invert_scale", "kind": "ref", "category": "function", "info": " self._invert_scale(ax, agg_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1042, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color=self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1052, "name": "plot_errorbars", "kind": "ref", "category": "function", "info": " self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1055, "name": "_configure_legend", "kind": "ref", "category": "function", "info": " self._configure_legend(ax, ax.plot, sub_kws, semantic_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1057, "name": "plot_bars", "kind": "def", "category": "function", "info": " def plot_bars(\n self,\n aggregator,\n dodge,\n gap,\n width,\n fill,\n color,\n capsize,\n err_kws,\n plot_kws,\n ):\n\n agg_var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n iter_vars = [\"hue\"]\n\n ax = self.ax\n\n if self._hue_map.levels is None:\n dodge = False\n\n if dodge and capsize is not None:\n capsize = capsize / len(self._hue_map.levels)\n\n if not fill:\n plot_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n err_kws.setdefault(\"linewidth\", 1.5 * mpl.rcParams[\"lines.linewidth\"])\n\n for sub_vars, sub_data in self.iter_data(iter_vars,\n from_comp_data=True,\n allow_empty=True):\n\n ax = self._get_axes(sub_vars)\n\n agg_data = sub_data if sub_data.empty else (\n sub_data\n .groupby(self.orient)\n .apply(aggregator, agg_var)\n .reset_index()\n )\n\n agg_data[\"width\"] = width * self._native_width\n if dodge:\n self._dodge(sub_vars, agg_data)\n if gap:\n agg_data[\"width\"] *= 1 - gap\n\n agg_data[\"edge\"] = agg_data[self.orient] - agg_data[\"width\"] / 2\n self._invert_scale(ax, agg_data)\n\n if self.orient == \"x\":\n bar_func = ax.bar\n kws = dict(\n x=agg_data[\"edge\"], height=agg_data[\"y\"], width=agg_data[\"width\"]\n )\n else:\n bar_func = ax.barh\n kws = dict(\n y=agg_data[\"edge\"], width=agg_data[\"x\"], height=agg_data[\"width\"]\n )\n\n main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n\n # Set both color and facecolor for property cycle logic\n kws[\"align\"] = \"edge\"\n if fill:\n kws.update(color=main_color, facecolor=main_color)\n else:\n kws.update(color=main_color, edgecolor=main_color, facecolor=\"none\")\n\n bar_func(**{**kws, **plot_kws})\n\n if aggregator.error_method is not None:\n self.plot_errorbars(\n ax, agg_data, capsize,\n {\"color\": \".26\" if fill else main_color, **err_kws}\n )\n\n self._configure_legend(ax, ax.fill_between)\n\n def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1086, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(iter_vars,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1090, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1094, "name": "groupby", "kind": "ref", "category": "function", "info": " .groupby(self.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1095, "name": "apply", "kind": "ref", "category": "function", "info": " .apply(aggregator, agg_var)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1096, "name": "reset_index", "kind": "ref", "category": "function", "info": " .reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1101, "name": "_dodge", "kind": "ref", "category": "function", "info": " self._dodge(sub_vars, agg_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1106, "name": "_invert_scale", "kind": "ref", "category": "function", "info": " self._invert_scale(ax, agg_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1119, "name": "_hue_map", "kind": "ref", "category": "function", "info": " main_color = self._hue_map(sub_vars[\"hue\"]) if \"hue\" in sub_vars else color\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1128, "name": "bar_func", "kind": "ref", "category": "function", "info": " bar_func(**{**kws, **plot_kws})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1131, "name": "plot_errorbars", "kind": "ref", "category": "function", "info": " self.plot_errorbars(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1136, "name": "_configure_legend", "kind": "ref", "category": "function", "info": " self._configure_legend(ax, ax.fill_between)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1138, "name": "plot_errorbars", "kind": "def", "category": "function", "info": " def plot_errorbars(self, ax, data, capsize, err_kws):\n\n var = {\"x\": \"y\", \"y\": \"x\"}[self.orient]\n for row in data.to_dict(\"records\"):\n\n row = dict(row)\n pos = np.array([row[self.orient], row[self.orient]])\n val = np.array([row[f\"{var}min\"], row[f\"{var}max\"]])\n\n cw = capsize * self._native_width / 2\n if self._log_scaled(self.orient):\n log_pos = np.log10(pos)\n cap = 10 ** (log_pos[0] - cw), 10 ** (log_pos[1] + cw)\n else:\n cap = pos[0] - cw, pos[1] + cw\n\n if capsize:\n pos = np.concatenate([\n [*cap, np.nan], pos, [np.nan, *cap]\n ])\n val = np.concatenate([\n [val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],\n ])\n\n if self.orient == \"x\":\n args = pos, val\n else:\n args = val, pos\n ax.plot(*args, **err_kws)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1141, "name": "to_dict", "kind": "ref", "category": "function", "info": " for row in data.to_dict(\"records\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1148, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.orient):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1169, "name": "_CategoricalAggPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1174, "name": "_CategoricalFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1178, "name": "_CategoricalAggFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1183, "name": "_CategoricalPlotter", "kind": "def", "category": "class", "info": "establish_variables\t_group_longform\testablish_colors\thue_offsets\tnested_width\tannotate_axes\tadd_legend_data"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1189, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, x=None, y=None, hue=None, data=None,\n orient=None, order=None, hue_order=None,\n units=None):\n \"\"\"Convert input specification into a common representation.\"\"\"\n # Option 1:\n # We are plotting a wide-form dataset\n # -----------------------------------\n if x is None and y is None:\n\n # Do a sanity check on the inputs\n if hue is not None:\n error = \"Cannot use `hue` without `x` and `y`\"\n raise ValueError(error)\n\n # No hue grouping with wide inputs\n plot_hues = None\n hue_title = None\n hue_names = None\n\n # No statistical units with wide inputs\n plot_units = None\n\n # We also won't get a axes labels here\n value_label = None\n group_label = None\n\n # Option 1a:\n # The input data is a Pandas DataFrame\n # ------------------------------------\n\n if isinstance(data, pd.DataFrame):\n\n # Order the data correctly\n if order is None:\n order = []\n # Reduce to just numeric columns\n for col in data:\n if variable_type(data[col]) == \"numeric\":\n order.append(col)\n plot_data = data[order]\n group_names = order\n group_label = data.columns.name\n\n # Convert to a list of arrays, the common representation\n iter_data = plot_data.items()\n plot_data = [np.asarray(s, float) for k, s in iter_data]\n\n # Option 1b:\n # The input data is an array or list\n # ----------------------------------\n\n else:\n\n # We can't reorder the data\n if order is not None:\n error = \"Input data must be a pandas object to reorder\"\n raise ValueError(error)\n\n # The input data is an array\n if hasattr(data, \"shape\"):\n if len(data.shape) == 1:\n if np.isscalar(data[0]):\n plot_data = [data]\n else:\n plot_data = list(data)\n elif len(data.shape) == 2:\n nr, nc = data.shape\n if nr == 1 or nc == 1:\n plot_data = [data.ravel()]\n else:\n plot_data = [data[:, i] for i in range(nc)]\n else:\n error = (\"Input `data` can have no \"\n \"more than 2 dimensions\")\n raise ValueError(error)\n\n # Check if `data` is None to let us bail out here (for testing)\n elif data is None:\n plot_data = [[]]\n\n # The input data is a flat list\n elif np.isscalar(data[0]):\n plot_data = [data]\n\n # The input data is a nested list\n # This will catch some things that might fail later\n # but exhaustive checks are hard\n else:\n plot_data = data\n\n # Convert to a list of arrays, the common representation\n plot_data = [np.asarray(d, float) for d in plot_data]\n\n # The group names will just be numeric indices\n group_names = list(range(len(plot_data)))\n\n # Figure out the plotting orientation\n orient = \"y\" if str(orient)[0] in \"hy\" else \"x\"\n\n # Option 2:\n # We are plotting a long-form dataset\n # -----------------------------------\n\n else:\n\n # See if we need to get variables from `data`\n if data is not None:\n x = data.get(x, x)\n y = data.get(y, y)\n hue = data.get(hue, hue)\n units = data.get(units, units)\n\n # Validate the inputs\n for var in [x, y, hue, units]:\n if isinstance(var, str):\n err = f\"Could not interpret input '{var}'\"\n raise ValueError(err)\n\n # Figure out the plotting orientation\n orient = infer_orient(x, y, orient, require_numeric=self.require_numeric)\n\n # Option 2a:\n # We are plotting a single set of data\n # ------------------------------------\n if x is None or y is None:\n\n # Determine where the data are\n vals = y if x is None else x\n\n # Put them into the common representation\n plot_data = [np.asarray(vals)]\n\n # Get a label for the value axis\n if hasattr(vals, \"name\"):\n value_label = vals.name\n else:\n value_label = None\n\n # This plot will not have group labels or hue nesting\n groups = None\n group_label = None\n group_names = []\n plot_hues = None\n hue_names = None\n hue_title = None\n plot_units = None\n\n # Option 2b:\n # We are grouping the data values by another variable\n # ---------------------------------------------------\n else:\n\n # Determine which role each variable will play\n if orient == \"x\":\n vals, groups = y, x\n else:\n vals, groups = x, y\n\n # Get the categorical axis label\n group_label = None\n if hasattr(groups, \"name\"):\n group_label = groups.name\n\n # Get the order on the categorical axis\n group_names = categorical_order(groups, order)\n\n # Group the numeric data\n plot_data, value_label = self._group_longform(vals, groups,\n group_names)\n\n # Now handle the hue levels for nested ordering\n if hue is None:\n plot_hues = None\n hue_title = None\n hue_names = None\n else:\n\n # Get the order of the hue levels\n hue_names = categorical_order(hue, hue_order)\n\n # Group the hue data\n plot_hues, hue_title = self._group_longform(hue, groups,\n group_names)\n\n # Now handle the units for nested observations\n if units is None:\n plot_units = None\n else:\n plot_units, _ = self._group_longform(units, groups,\n group_names)\n\n # Assign object attributes\n # ------------------------\n self.orient = orient\n self.plot_data = plot_data\n self.group_label = group_label\n self.value_label = value_label\n self.group_names = group_names\n self.plot_hues = plot_hues\n self.hue_title = hue_title\n self.hue_names = hue_names\n self.plot_units = plot_units\n\n def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"x\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"x\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1226, "name": "variable_type", "kind": "ref", "category": "function", "info": " if variable_type(data[col]) == \"numeric\":\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1308, "name": "infer_orient", "kind": "ref", "category": "function", "info": " orient = infer_orient(x, y, orient, require_numeric=self.require_numeric)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1353, "name": "categorical_order", "kind": "ref", "category": "function", "info": " group_names = categorical_order(groups, order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1356, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_data, value_label = self._group_longform(vals, groups,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1367, "name": "categorical_order", "kind": "ref", "category": "function", "info": " hue_names = categorical_order(hue, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1370, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_hues, hue_title = self._group_longform(hue, groups,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1377, "name": "_group_longform", "kind": "ref", "category": "function", "info": " plot_units, _ = self._group_longform(units, groups,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1392, "name": "_group_longform", "kind": "def", "category": "function", "info": " def _group_longform(self, vals, grouper, order):\n \"\"\"Group a long-form variable by another with correct order.\"\"\"\n # Ensure that the groupby will work\n if not isinstance(vals, pd.Series):\n if isinstance(grouper, pd.Series):\n index = grouper.index\n else:\n index = None\n vals = pd.Series(vals, index=index)\n\n # Group the val data\n grouped_vals = vals.groupby(grouper)\n out_data = []\n for g in order:\n try:\n g_vals = grouped_vals.get_group(g)\n except KeyError:\n g_vals = np.array([])\n out_data.append(g_vals)\n\n # Get the vals axis label\n label = vals.name\n\n return out_data, label\n\n def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"x\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"x\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1403, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped_vals = vals.groupby(grouper)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1407, "name": "get_group", "kind": "ref", "category": "function", "info": " g_vals = grouped_vals.get_group(g)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1417, "name": "establish_colors", "kind": "def", "category": "function", "info": " def establish_colors(self, color, palette, saturation):\n \"\"\"Get a list of colors for the main component of the plots.\"\"\"\n if self.hue_names is None:\n n_colors = len(self.plot_data)\n else:\n n_colors = len(self.hue_names)\n\n # Determine the main colors\n if color is None and palette is None:\n # Determine whether the current palette will have enough values\n # If not, we'll default to the husl palette so each is distinct\n current_palette = utils.get_color_cycle()\n if n_colors <= len(current_palette):\n colors = color_palette(n_colors=n_colors)\n else:\n colors = husl_palette(n_colors, l=.7) # noqa\n\n elif palette is None:\n # When passing a specific color, the interpretation depends\n # on whether there is a hue variable or not.\n # If so, we will make a blend palette so that the different\n # levels have some amount of variation.\n if self.hue_names is None:\n colors = [color] * n_colors\n else:\n if self.default_palette == \"light\":\n colors = light_palette(color, n_colors)\n elif self.default_palette == \"dark\":\n colors = dark_palette(color, n_colors)\n else:\n raise RuntimeError(\"No default palette specified\")\n else:\n\n # Let `palette` be a dict mapping level to color\n if isinstance(palette, dict):\n if self.hue_names is None:\n levels = self.group_names\n else:\n levels = self.hue_names\n palette = [palette[l] for l in levels]\n\n colors = color_palette(palette, n_colors)\n\n # Desaturate a bit because these are patches\n if saturation < 1:\n colors = color_palette(colors, desat=saturation)\n\n # Convert the colors to a common representations\n rgb_colors = color_palette(colors)\n\n # Determine the gray color to use for the lines framing the plot\n light_vals = [rgb_to_hls(*c)[1] for c in rgb_colors]\n lum = min(light_vals) * .6\n gray = mpl.colors.rgb2hex((lum, lum, lum))\n\n # Assign object attributes\n self.colors = rgb_colors\n self.gray = gray\n\n @property\n def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"x\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"x\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1428, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " current_palette = utils.get_color_cycle()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1430, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(n_colors=n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1432, "name": "husl_palette", "kind": "ref", "category": "function", "info": " colors = husl_palette(n_colors, l=.7) # noqa\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1443, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1445, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1458, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(palette, n_colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1462, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(colors, desat=saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1465, "name": "color_palette", "kind": "ref", "category": "function", "info": " rgb_colors = color_palette(colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1470, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " gray = mpl.colors.rgb2hex((lum, lum, lum))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1477, "name": "hue_offsets", "kind": "def", "category": "function", "info": " def hue_offsets(self):\n \"\"\"A list of center positions for plots when hue nesting is used.\"\"\"\n n_levels = len(self.hue_names)\n if self.dodge:\n each_width = self.width / n_levels\n offsets = np.linspace(0, self.width - each_width, n_levels)\n offsets -= offsets.mean()\n else:\n offsets = np.zeros(n_levels)\n\n return offsets\n\n @property\n def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"x\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"x\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1490, "name": "nested_width", "kind": "def", "category": "function", "info": " def nested_width(self):\n \"\"\"A float with the width of plot elements when hue nesting is used.\"\"\"\n if self.dodge:\n width = self.width / len(self.hue_names) * .98\n else:\n width = self.width\n return width\n\n def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"x\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"x\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1498, "name": "annotate_axes", "kind": "def", "category": "function", "info": " def annotate_axes(self, ax):\n \"\"\"Add descriptive labels to an Axes object.\"\"\"\n if self.orient == \"x\":\n xlabel, ylabel = self.group_label, self.value_label\n else:\n xlabel, ylabel = self.value_label, self.group_label\n\n if xlabel is not None:\n ax.set_xlabel(xlabel)\n if ylabel is not None:\n ax.set_ylabel(ylabel)\n\n group_names = self.group_names\n if not group_names:\n group_names = [\"\" for _ in range(len(self.plot_data))]\n\n if self.orient == \"x\":\n ax.set_xticks(np.arange(len(self.plot_data)))\n ax.set_xticklabels(group_names)\n else:\n ax.set_yticks(np.arange(len(self.plot_data)))\n ax.set_yticklabels(group_names)\n\n if self.orient == \"x\":\n ax.xaxis.grid(False)\n ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n else:\n ax.yaxis.grid(False)\n ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n\n if self.hue_names is not None:\n ax.legend(loc=\"best\", title=self.hue_title)\n\n def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1506, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1508, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1515, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1516, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels(group_names)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1518, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks(np.arange(len(self.plot_data)))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1519, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ax.set_yticklabels(group_names)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1523, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1526, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(-.5, len(self.plot_data) - .5, auto=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1531, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax, color, label):\n \"\"\"Add a dummy patch object so we can get legend data.\"\"\"\n rect = plt.Rectangle([0, 0], 0, 0,\n linewidth=self.linewidth / 2,\n edgecolor=self.gray,\n facecolor=color,\n label=label)\n ax.add_patch(rect)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1538, "name": "add_patch", "kind": "ref", "category": "function", "info": " ax.add_patch(rect)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1541, "name": "_LVPlotter", "kind": "def", "category": "class", "info": "__init__\t_lv_box_ends\t_lv_outliers\t_width_functions\t_lvplot\tdraw_letter_value_plot\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1581, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(x, y, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1582, "name": "establish_colors", "kind": "ref", "category": "function", "info": " self.establish_colors(color, palette, saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1584, "name": "_lv_box_ends", "kind": "def", "category": "function", "info": " def _lv_box_ends(self, vals):\n \"\"\"Get the number of data points and calculate `depth` of\n letter-value plot.\"\"\"\n vals = np.asarray(vals)\n # Remove infinite values while handling a 'object' dtype\n # that can come from pd.Float64Dtype() input\n with pd.option_context('mode.use_inf_as_na', True):\n vals = vals[~pd.isnull(vals)]\n n = len(vals)\n p = self.outlier_prop\n\n # Select the depth, i.e. number of boxes to draw, based on the method\n if self.k_depth == 'full':\n # extend boxes to 100% of the data\n k = int(np.log2(n)) + 1\n elif self.k_depth == 'tukey':\n # This results with 5-8 points in each tail\n k = int(np.log2(n)) - 3\n elif self.k_depth == 'proportion':\n k = int(np.log2(n)) - int(np.log2(n * p)) + 1\n elif self.k_depth == 'trustworthy':\n point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n k = int(np.log2(n / point_conf)) + 1\n else:\n k = int(self.k_depth) # allow having k as input\n # If the number happens to be less than 1, set k to 1\n if k < 1:\n k = 1\n\n # Calculate the upper end for each of the k boxes\n upper = [100 * (1 - 0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Calculate the lower end for each of the k boxes\n lower = [100 * (0.5 ** (i + 1)) for i in range(k, 0, -1)]\n # Stitch the box ends together\n percentile_ends = [(i, j) for i, j in zip(lower, upper)]\n box_ends = [np.percentile(vals, q) for q in percentile_ends]\n return box_ends, k\n\n def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"x\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1605, "name": "_normal_quantile_func", "kind": "ref", "category": "function", "info": " point_conf = 2 * _normal_quantile_func(1 - self.trust_alpha / 2) ** 2\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1622, "name": "_lv_outliers", "kind": "def", "category": "function", "info": " def _lv_outliers(self, vals, k):\n \"\"\"Find the outliers based on the letter value depth.\"\"\"\n box_edge = 0.5 ** (k + 1)\n perc_ends = (100 * box_edge, 100 * (1 - box_edge))\n edges = np.percentile(vals, perc_ends)\n lower_out = vals[np.where(vals < edges[0])[0]]\n upper_out = vals[np.where(vals > edges[1])[0]]\n return np.concatenate((lower_out, upper_out))\n\n def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"x\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1631, "name": "_width_functions", "kind": "def", "category": "function", "info": " def _width_functions(self, width_func):\n # Dictionary of functions for computing the width of the boxes\n width_functions = {'linear': lambda h, i, k: (i + 1.) / k,\n 'exponential': lambda h, i, k: 2**(-k + i - 1),\n 'area': lambda h, i, k: (1 - 2**(-k + i - 2)) / h}\n return width_functions[width_func]\n\n def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"x\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1638, "name": "_lvplot", "kind": "def", "category": "function", "info": " def _lvplot(self, box_data, positions,\n color=[255. / 256., 185. / 256., 0.],\n widths=1, ax=None, box_kws=None,\n flier_kws=None,\n line_kws=None):\n\n # -- Default keyword dicts - based on\n # distributions.plot_univariate_histogram\n box_kws = {} if box_kws is None else box_kws.copy()\n flier_kws = {} if flier_kws is None else flier_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n\n # Set the default kwargs for the boxes\n box_default_kws = dict(edgecolor=self.gray,\n linewidth=self.linewidth)\n for k, v in box_default_kws.items():\n box_kws.setdefault(k, v)\n\n # Set the default kwargs for the lines denoting medians\n line_default_kws = dict(\n color=\".15\", alpha=0.45, solid_capstyle=\"butt\", linewidth=self.linewidth\n )\n for k, v in line_default_kws.items():\n line_kws.setdefault(k, v)\n\n # Set the default kwargs for the outliers scatterplot\n flier_default_kws = dict(marker='d', color=self.gray)\n for k, v in flier_default_kws.items():\n flier_kws.setdefault(k, v)\n\n vert = self.orient == \"x\"\n x = positions[0]\n box_data = np.asarray(box_data)\n\n # If we only have one data point, plot a line\n if len(box_data) == 1:\n line_kws.update({\n 'color': box_kws['edgecolor'],\n 'linestyle': box_kws.get('linestyle', '-'),\n 'linewidth': max(box_kws[\"linewidth\"], line_kws[\"linewidth\"])\n })\n ys = [box_data[0], box_data[0]]\n xs = [x - widths / 2, x + widths / 2]\n if vert:\n xx, yy = xs, ys\n else:\n xx, yy = ys, xs\n ax.plot(xx, yy, **line_kws)\n else:\n # Get the number of data points and calculate \"depth\" of\n # letter-value plot\n box_ends, k = self._lv_box_ends(box_data)\n\n # Anonymous functions for calculating the width and height\n # of the letter value boxes\n width = self._width_functions(self.scale)\n\n # Function to find height of boxes\n def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1689, "name": "_lv_box_ends", "kind": "ref", "category": "function", "info": " box_ends, k = self._lv_box_ends(box_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1693, "name": "_width_functions", "kind": "ref", "category": "function", "info": " width = self._width_functions(self.scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1696, "name": "height", "kind": "def", "category": "function", "info": " def height(b):\n return b[1] - b[0]\n\n # Functions to construct the letter value boxes\n def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1700, "name": "vert_perc_box", "kind": "def", "category": "function", "info": " def vert_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((x - widths * w / 2, b[0]),\n widths * w,\n height(b), fill=True)\n return rect\n\n def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1703, "name": "height", "kind": "ref", "category": "function", "info": " height(b), fill=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1706, "name": "horz_perc_box", "kind": "def", "category": "function", "info": " def horz_perc_box(x, b, i, k, w):\n rect = Patches.Rectangle((b[0], x - widths * w / 2),\n height(b), widths * w,\n fill=True)\n return rect\n\n # Scale the width of the boxes so the biggest starts at 1\n w_area = np.array([width(height(b), i, k)\n for i, b in enumerate(box_ends)])\n w_area = w_area / np.max(w_area)\n\n # Calculate the medians\n y = np.median(box_data)\n\n # Calculate the outliers and plot (only if showfliers == True)\n outliers = []\n if self.showfliers:\n outliers = self._lv_outliers(box_data, k)\n hex_color = mpl.colors.rgb2hex(color)\n\n if vert:\n box_func = vert_perc_box\n xs_median = [x - widths / 2, x + widths / 2]\n ys_median = [y, y]\n xs_outliers = np.full(len(outliers), x)\n ys_outliers = outliers\n\n else:\n box_func = horz_perc_box\n xs_median = [y, y]\n ys_median = [x - widths / 2, x + widths / 2]\n xs_outliers = outliers\n ys_outliers = np.full(len(outliers), x)\n\n # Plot the medians\n ax.plot(\n xs_median,\n ys_median,\n **line_kws\n )\n\n # Plot outliers (if any)\n if len(outliers) > 0:\n ax.scatter(xs_outliers, ys_outliers,\n **flier_kws\n )\n\n # Construct a color map from the input color\n rgb = [hex_color, (1, 1, 1)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n # Make sure that the last boxes contain hue and are not pure white\n rgb = [hex_color, cmap(.85)]\n cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n\n # Update box_kws with `cmap` if not defined in dict until now\n box_kws.setdefault('cmap', cmap)\n\n boxes = [box_func(x, b[0], i, k, b[1])\n for i, b in enumerate(zip(box_ends, w_area))]\n\n collection = PatchCollection(boxes, **box_kws)\n\n # Set the color gradation, first box will have color=hex_color\n collection.set_array(np.array(np.linspace(1, 0, len(boxes))))\n\n # Plot the boxes\n ax.add_collection(collection)\n\n def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1708, "name": "height", "kind": "ref", "category": "function", "info": " height(b), widths * w,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1713, "name": "width", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1713, "name": "height", "kind": "ref", "category": "function", "info": " w_area = np.array([width(height(b), i, k)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1723, "name": "_lv_outliers", "kind": "ref", "category": "function", "info": " outliers = self._lv_outliers(box_data, k)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1724, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex_color = mpl.colors.rgb2hex(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1755, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1757, "name": "cmap", "kind": "ref", "category": "function", "info": " rgb = [hex_color, cmap(.85)]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1758, "name": "from_list", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap.from_list('new_map', rgb)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1763, "name": "box_func", "kind": "ref", "category": "function", "info": " boxes = [box_func(x, b[0], i, k, b[1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1772, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(collection)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1774, "name": "draw_letter_value_plot", "kind": "def", "category": "function", "info": " def draw_letter_value_plot(self, ax, box_kws=None, flier_kws=None,\n line_kws=None):\n \"\"\"Use matplotlib to draw a letter value plot on an Axes.\"\"\"\n\n for i, group_data in enumerate(self.plot_data):\n\n if self.plot_hues is None:\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n # Draw a single box or a set of boxes\n # with a single level of grouping\n box_data = remove_na(group_data)\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[i]\n\n self._lvplot(box_data,\n positions=[i],\n color=color,\n widths=self.width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n else:\n # Draw nested groups of boxes\n offsets = self.hue_offsets\n for j, hue_level in enumerate(self.hue_names):\n\n # Add a legend for this hue level\n if not i:\n self.add_legend_data(ax, self.colors[j], hue_level)\n\n # Handle case where there is data at this level\n if group_data.size == 0:\n continue\n\n hue_mask = self.plot_hues[i] == hue_level\n box_data = remove_na(group_data[hue_mask])\n\n # Handle case where there is no non-null data\n if box_data.size == 0:\n continue\n\n color = self.colors[j]\n center = i + offsets[j]\n self._lvplot(box_data,\n positions=[center],\n color=color,\n widths=self.nested_width,\n ax=ax,\n box_kws=box_kws,\n flier_kws=flier_kws,\n line_kws=line_kws)\n\n # Autoscale the values axis to make sure all patches are visible\n ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n\n def plot(self, ax, box_kws, flier_kws, line_kws):\n \"\"\"Make the plot.\"\"\"\n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"y\":\n ax.invert_yaxis()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1788, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1796, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1812, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax, self.colors[j], hue_level)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1819, "name": "remove_na", "kind": "ref", "category": "function", "info": " box_data = remove_na(group_data[hue_mask])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1827, "name": "_lvplot", "kind": "ref", "category": "function", "info": " self._lvplot(box_data,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1837, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=self.orient == \"y\", scaley=self.orient == \"x\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1841, "name": "draw_letter_value_plot", "kind": "ref", "category": "function", "info": " self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1842, "name": "annotate_axes", "kind": "ref", "category": "function", "info": " self.annotate_axes(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 1844, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2083, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2085, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2100, "name": "_dodge_needed", "kind": "ref", "category": "function", "info": " dodge = p._dodge_needed()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2103, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2105, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2108, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2109, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2112, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2113, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2119, "name": "plot_boxes", "kind": "ref", "category": "function", "info": " p.plot_boxes(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2132, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2133, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2209, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2211, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2226, "name": "_dodge_needed", "kind": "ref", "category": "function", "info": " dodge = p._dodge_needed()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2229, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2231, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2234, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2235, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2238, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2239, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2245, "name": "_scale_backcompat", "kind": "ref", "category": "function", "info": " density_norm, common_norm = p._scale_backcompat(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2261, "name": "plot_violins", "kind": "ref", "category": "function", "info": " p.plot_violins(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2278, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2279, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2394, "name": "boxenplot", "kind": "def", "category": "function", "info": "def boxenplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75,\n width=.8, dodge=True, k_depth='tukey', linewidth=None,\n scale='exponential', outlier_prop=0.007, trust_alpha=0.05,\n showfliers=True,\n ax=None, box_kws=None, flier_kws=None, line_kws=None,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2402, "name": "_LVPlotter", "kind": "ref", "category": "function", "info": " plotter = _LVPlotter(x, y, hue, data, order, hue_order,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2490, "name": "stripplot", "kind": "def", "category": "function", "info": "def stripplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n jitter=True, dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0,\n hue_norm=None, native_scale=False, formatter=None, legend=\"auto\",\n ax=None, **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2498, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2500, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2514, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2516, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2519, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2520, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2522, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2524, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2536, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2547, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2548, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2617, "name": "swarmplot", "kind": "def", "category": "function", "info": "def swarmplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n dodge=False, orient=None, color=None, palette=None,\n size=5, edgecolor=\"gray\", linewidth=0, hue_norm=None,\n native_scale=False, formatter=None, legend=\"auto\", warn_thresh=.05,\n ax=None, **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2625, "name": "_CategoricalPlotterNew", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotterNew(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2627, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalPlotterNew.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2641, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2643, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2649, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2650, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2652, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2654, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2668, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2676, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2677, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2743, "name": "barplot", "kind": "def", "category": "function", "info": "def barplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, units=None, seed=None,\n orient=None, color=None, palette=None, saturation=.75, fill=True, hue_norm=None,\n width=.8, dodge=\"auto\", gap=0, native_scale=False, formatter=None, legend=\"auto\",\n capsize=0, err_kws=None, ci=deprecated, errcolor=deprecated, errwidth=deprecated,\n ax=None, **kwargs,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2752, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2759, "name": "_CategoricalAggPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalAggPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2761, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalAggPlotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2776, "name": "_dodge_needed", "kind": "ref", "category": "function", "info": " dodge = p._dodge_needed()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2779, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2781, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2784, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2785, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2788, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2789, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.bar, hue, color, kwargs, saturation=saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2791, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " aggregator = EstimateAggregator(estimator, errorbar, n_boot=n_boot, seed=seed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2792, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " err_kws = {} if err_kws is None else _normalize_kwargs(err_kws, mpl.lines.Line2D)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2795, "name": "_err_kws_backcompat", "kind": "ref", "category": "function", "info": " err_kws, capsize = p._err_kws_backcompat(err_kws, errcolor, errwidth, capsize)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2797, "name": "plot_bars", "kind": "ref", "category": "function", "info": " p.plot_bars(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2809, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2810, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2883, "name": "pointplot", "kind": "def", "category": "function", "info": "def pointplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, units=None, seed=None,\n color=None, palette=None, hue_norm=None, markers=default, linestyles=default,\n dodge=False, native_scale=False, orient=None, capsize=0,\n formatter=None, legend=\"auto\", err_kws=None,\n ci=deprecated, errwidth=deprecated, join=deprecated, scale=deprecated,\n ax=None,\n **kwargs,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2894, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = utils._deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2896, "name": "_CategoricalAggPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalAggPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2898, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalAggPlotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2912, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2914, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2917, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2918, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2920, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2921, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2923, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " aggregator = EstimateAggregator(estimator, errorbar, n_boot=n_boot, seed=seed)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2924, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " err_kws = {} if err_kws is None else _normalize_kwargs(err_kws, mpl.lines.Line2D)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2927, "name": "_point_kwargs_backcompat", "kind": "ref", "category": "function", "info": " p._point_kwargs_backcompat(scale, join, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2928, "name": "_err_kws_backcompat", "kind": "ref", "category": "function", "info": " err_kws, capsize = p._err_kws_backcompat(err_kws, None, errwidth, capsize)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2930, "name": "plot_points", "kind": "ref", "category": "function", "info": " p.plot_points(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2941, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 2942, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3028, "name": "countplot", "kind": "def", "category": "function", "info": "def countplot(\n data=None, *, x=None, y=None, hue=None, order=None, hue_order=None,\n orient=None, color=None, palette=None, saturation=.75, fill=True, hue_norm=None,\n stat=\"count\", width=.8, dodge=\"auto\", gap=0, native_scale=False, formatter=None,\n legend=\"auto\", ax=None, **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3044, "name": "_CategoricalAggPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalAggPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3046, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_CategoricalAggPlotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3061, "name": "_dodge_needed", "kind": "ref", "category": "function", "info": " dodge = p._dodge_needed()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3064, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3066, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3069, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3070, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3073, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm, saturation=saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3074, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(ax.bar, hue, color, kwargs, saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3080, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"stat\", [\"count\", \"percent\", \"probability\", \"proportion\"], stat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3086, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " aggregator = EstimateAggregator(\"sum\", errorbar=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3088, "name": "plot_bars", "kind": "ref", "category": "function", "info": " p.plot_bars(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3100, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " p._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3101, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3159, "name": "catplot", "kind": "def", "category": "function", "info": "def catplot(\n data=None, *, x=None, y=None, hue=None, row=None, col=None,\n col_wrap=None, estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000,\n units=None, seed=None, order=None, hue_order=None, row_order=None,\n col_order=None, height=5, aspect=1, kind=\"strip\", native_scale=False,\n formatter=None, orient=None, color=None, palette=None, hue_norm=None,\n legend=\"auto\", legend_out=True, sharex=True, sharey=True,\n margin_titles=False, facet_kws=None, ci=\"deprecated\",\n **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3205, "name": "Plotter", "kind": "ref", "category": "function", "info": " p = Plotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3207, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=Plotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3222, "name": "rename", "kind": "ref", "category": "function", "info": " data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3223, "name": "duplicated", "kind": "ref", "category": "function", "info": " data = data.loc[:, ~data.columns.duplicated()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3231, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3246, "name": "scale_categorical", "kind": "ref", "category": "function", "info": " p.scale_categorical(p.orient, order=order, formatter=formatter)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3248, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3254, "name": "_palette_without_hue_backcompat", "kind": "ref", "category": "function", "info": " hue_order = p._palette_without_hue_backcompat(palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3255, "name": "_hue_backcompat", "kind": "ref", "category": "function", "info": " palette, hue_order = p._hue_backcompat(color, palette, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3261, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3270, "name": "desaturate", "kind": "ref", "category": "function", "info": " color = desaturate(color, saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3276, "name": "_dodge_needed", "kind": "ref", "category": "function", "info": " dodge = p._dodge_needed()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3289, "name": "plot_strips", "kind": "ref", "category": "function", "info": " p.plot_strips(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3310, "name": "plot_swarms", "kind": "ref", "category": "function", "info": " p.plot_swarms(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3328, "name": "plot_boxes", "kind": "ref", "category": "function", "info": " p.plot_boxes(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3353, "name": "_scale_backcompat", "kind": "ref", "category": "function", "info": " density_norm, common_norm = p._scale_backcompat(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3377, "name": "plot_violins", "kind": "ref", "category": "function", "info": " p.plot_violins(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3396, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " aggregator = EstimateAggregator(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3407, "name": "_point_kwargs_backcompat", "kind": "ref", "category": "function", "info": " p._point_kwargs_backcompat(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3412, "name": "_err_kws_backcompat", "kind": "ref", "category": "function", "info": " err_kws, capsize = p._err_kws_backcompat(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3413, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " _normalize_kwargs(kwargs.pop(\"err_kws\", {}), mpl.lines.Line2D),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3419, "name": "plot_points", "kind": "ref", "category": "function", "info": " p.plot_points(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3432, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " aggregator = EstimateAggregator(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3435, "name": "_err_kws_backcompat", "kind": "ref", "category": "function", "info": " err_kws, capsize = p._err_kws_backcompat(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3436, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " _normalize_kwargs(kwargs.pop(\"err_kws\", {}), mpl.lines.Line2D),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3444, "name": "plot_bars", "kind": "ref", "category": "function", "info": " p.plot_bars(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3458, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " aggregator = EstimateAggregator(\"sum\", errorbar=None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3464, "name": "_check_argument", "kind": "ref", "category": "function", "info": " stat = _check_argument(\"stat\", stat_options, kwargs.pop(\"stat\", \"count\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3473, "name": "plot_bars", "kind": "ref", "category": "function", "info": " p.plot_bars(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3486, "name": "_adjust_cat_axis", "kind": "ref", "category": "function", "info": " p._adjust_cat_axis(ax, axis=p.orient)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3488, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.variables.get(\"x\"), p.variables.get(\"y\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3489, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3493, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " g._update_legend_data(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3497, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=p.variables.get(\"hue\"), label_order=hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3524, "name": "_CategoricalPlotter", "kind": "ref", "category": "function", "info": " p = _CategoricalPlotter()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3526, "name": "establish_variables", "kind": "ref", "category": "function", "info": " p.establish_variables(x_, y_, hue, data, orient, order, hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3550, "name": "establish_colors", "kind": "ref", "category": "function", "info": " p.establish_colors(color, palette, 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3573, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(**facet_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3578, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(plot_func, x=x, y=y, hue=hue, **plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3581, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.value_label, p.group_label)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3583, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(p.group_label, p.value_label)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3587, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(title=hue, label_order=hue_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3679, "name": "Beeswarm", "kind": "def", "category": "class", "info": "__init__\t__call__\tbeeswarm\tcould_overlap\tposition_candidates\tfirst_non_overlapping_candidate\tadd_gutters"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3705, "name": "transform", "kind": "ref", "category": "function", "info": " orig_xy = ax.transData.transform(orig_xy_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3712, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3715, "name": "item", "kind": "ref", "category": "function", "info": " edge = points.get_linewidth().item()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3725, "name": "beeswarm", "kind": "ref", "category": "function", "info": " new_xyr[sorter] = self.beeswarm(orig_xyr)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3732, "name": "inverted", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3732, "name": "transform", "kind": "ref", "category": "function", "info": " new_x_data, new_y_data = ax.transData.inverted().transform(new_xy).T\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3738, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_y_data, center, log_scale=log_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3740, "name": "add_gutters", "kind": "ref", "category": "function", "info": " self.add_gutters(new_x_data, center, log_scale=log_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3748, "name": "beeswarm", "kind": "def", "category": "function", "info": " def beeswarm(self, orig_xyr):\n \"\"\"Adjust x position of points to avoid overlaps.\"\"\"\n # In this method, `x` is always the categorical axis\n # Center of the swarm, in point coordinates\n midline = orig_xyr[0, 0]\n\n # Start the swarm with the first point\n swarm = np.atleast_2d(orig_xyr[0])\n\n # Loop over the remaining points\n for xyr_i in orig_xyr[1:]:\n\n # Find the points in the swarm that could possibly\n # overlap with the point we are currently placing\n neighbors = self.could_overlap(xyr_i, swarm)\n\n # Find positions that would be valid individually\n # with respect to each of the swarm neighbors\n candidates = self.position_candidates(xyr_i, neighbors)\n\n # Sort candidates by their centrality\n offsets = np.abs(candidates[:, 0] - midline)\n candidates = candidates[np.argsort(offsets)]\n\n # Find the first candidate that does not overlap any neighbors\n new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n\n # Place it into the swarm\n swarm = np.vstack([swarm, new_xyr_i])\n\n return swarm\n\n def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3762, "name": "could_overlap", "kind": "ref", "category": "function", "info": " neighbors = self.could_overlap(xyr_i, swarm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3766, "name": "position_candidates", "kind": "ref", "category": "function", "info": " candidates = self.position_candidates(xyr_i, neighbors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3773, "name": "first_non_overlapping_candidate", "kind": "ref", "category": "function", "info": " new_xyr_i = self.first_non_overlapping_candidate(candidates, neighbors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3780, "name": "could_overlap", "kind": "def", "category": "function", "info": " def could_overlap(self, xyr_i, swarm):\n \"\"\"Return a list of all swarm points that could overlap with target.\"\"\"\n # Because we work backwards through the swarm and can short-circuit,\n # the for-loop is faster than vectorization\n _, y_i, r_i = xyr_i\n neighbors = []\n for xyr_j in reversed(swarm):\n _, y_j, r_j = xyr_j\n if (y_i - y_j) < (r_i + r_j):\n neighbors.append(xyr_j)\n else:\n break\n return np.array(neighbors)[::-1]\n\n def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3794, "name": "position_candidates", "kind": "def", "category": "function", "info": " def position_candidates(self, xyr_i, neighbors):\n \"\"\"Return a list of coordinates that might be valid by adjusting x.\"\"\"\n candidates = [xyr_i]\n x_i, y_i, r_i = xyr_i\n left_first = True\n for x_j, y_j, r_j in neighbors:\n dy = y_i - y_j\n dx = np.sqrt(max((r_i + r_j) ** 2 - dy ** 2, 0)) * 1.05\n cl, cr = (x_j - dx, y_i, r_i), (x_j + dx, y_i, r_i)\n if left_first:\n new_candidates = [cl, cr]\n else:\n new_candidates = [cr, cl]\n candidates.extend(new_candidates)\n left_first = not left_first\n return np.array(candidates)\n\n def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3811, "name": "first_non_overlapping_candidate", "kind": "def", "category": "function", "info": " def first_non_overlapping_candidate(self, candidates, neighbors):\n \"\"\"Find the first candidate that does not overlap with the swarm.\"\"\"\n\n # If we have no neighbors, all candidates are good.\n if len(neighbors) == 0:\n return candidates[0]\n\n neighbors_x = neighbors[:, 0]\n neighbors_y = neighbors[:, 1]\n neighbors_r = neighbors[:, 2]\n\n for xyr_i in candidates:\n\n x_i, y_i, r_i = xyr_i\n\n dx = neighbors_x - x_i\n dy = neighbors_y - y_i\n sq_distances = np.square(dx) + np.square(dy)\n\n sep_needed = np.square(neighbors_r + r_i)\n\n # Good candidate does not overlap any of neighbors which means that\n # squared distance between candidate and any of the neighbors has\n # to be at least square of the summed radii\n good_candidate = np.all(sq_distances >= sep_needed)\n\n if good_candidate:\n return xyr_i\n\n raise RuntimeError(\n \"No non-overlapping candidates found. This should not happen.\"\n )\n\n def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3844, "name": "add_gutters", "kind": "def", "category": "function", "info": " def add_gutters(self, points, center, log_scale=False):\n \"\"\"Stop points from extending beyond their territory.\"\"\"\n half_width = self.width / 2\n if log_scale:\n low_gutter = 10 ** (np.log10(center) - half_width)\n else:\n low_gutter = center - half_width\n off_low = points < low_gutter\n if off_low.any():\n points[off_low] = low_gutter\n if log_scale:\n high_gutter = 10 ** (np.log10(center) + half_width)\n else:\n high_gutter = center + half_width\n off_high = points > high_gutter\n if off_high.any():\n points[off_high] = high_gutter\n\n gutter_prop = (off_high + off_low).sum() / len(points)\n if gutter_prop > self.warn_thresh:\n msg = (\n \"{:.1%} of the points cannot be placed; you may want \"\n \"to decrease the size of the markers or use stripplot.\"\n ).format(gutter_prop)\n warnings.warn(msg, UserWarning)\n\n return points\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3876, "name": "BoxPlotContainer", "kind": "def", "category": "class", "info": "__init__\t__repr__\t__getitem__\t__iter__\tget_label\tset_label\tget_children\tremove"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/categorical.py", "rel_fname": "seaborn/categorical.py", "line": 3902, "name": "BoxPlotArtists", "kind": "ref", "category": "function", "info": " return BoxPlotArtists(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/cm.py", "rel_fname": "seaborn/cm.py", "line": 1582, "name": "register_colormap", "kind": "ref", "category": "function", "info": " register_colormap(_name, _cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/cm.py", "rel_fname": "seaborn/cm.py", "line": 1583, "name": "register_colormap", "kind": "ref", "category": "function", "info": " register_colormap(_name + \"_r\", _cmap_r)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 84, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 86, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 87, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " dist=DocstringComponents(_dist_params),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 88, "name": "from_function_params", "kind": "ref", "category": "function", "info": " kde=DocstringComponents.from_function_params(KDE.__init__),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 89, "name": "from_function_params", "kind": "ref", "category": "function", "info": " hist=DocstringComponents.from_function_params(Histogram.__init__),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 90, "name": "from_function_params", "kind": "ref", "category": "function", "info": " ecdf=DocstringComponents.from_function_params(ECDF.__init__),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 99, "name": "_DistributionPlotter", "kind": "def", "category": "class", "info": "__init__\tunivariate\tdata_variable\thas_xy_data\t_add_legend\t_artist_kws\t_quantile_to_level\t_cmap_from_color\t_default_discrete\t_resolve_multiple\t_compute_univariate_density\tplot_univariate_histogram\tplot_bivariate_histogram\tplot_univariate_density\tplot_bivariate_density\tplot_univariate_ecdf\tplot_rug\t_plot_single_rug"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 115, "name": "univariate", "kind": "def", "category": "function", "info": " def univariate(self):\n \"\"\"Return True if only x or y are used.\"\"\"\n # TODO this could go down to core, but putting it here now.\n # We'd want to be conceptually clear that univariate only applies\n # to x/y and not to other semantics, which can exist.\n # We haven't settled on a good conceptual name for x/y.\n return bool({\"x\", \"y\"} - set(self.variables))\n\n @property\n def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 124, "name": "data_variable", "kind": "def", "category": "function", "info": " def data_variable(self):\n \"\"\"Return the variable with data for univariate plots.\"\"\"\n # TODO This could also be in core, but it should have a better name.\n if not self.univariate:\n raise AttributeError(\"This is not a univariate plot\")\n return {\"x\", \"y\"}.intersection(self.variables).pop()\n\n @property\n def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 132, "name": "has_xy_data", "kind": "def", "category": "function", "info": " def has_xy_data(self):\n \"\"\"Return True at least one of x or y is defined.\"\"\"\n # TODO see above points about where this should go\n return bool({\"x\", \"y\"} & set(self.variables))\n\n def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 137, "name": "_add_legend", "kind": "def", "category": "function", "info": " def _add_legend(\n self,\n ax_obj, artist, fill, element, multiple, alpha, artist_kws, legend_kws,\n ):\n \"\"\"Add artists that reflect semantic mappings and put then in a legend.\"\"\"\n # TODO note that this doesn't handle numeric mappings like the relational plots\n handles = []\n labels = []\n for level in self._hue_map.levels:\n color = self._hue_map(level)\n\n kws = self._artist_kws(\n artist_kws, fill, element, multiple, color, alpha\n )\n\n # color gets added to the kws to workaround an issue with barplot's color\n # cycle integration but it causes problems in this context where we are\n # setting artist properties directly, so pop it off here\n if \"facecolor\" in kws:\n kws.pop(\"color\", None)\n\n handles.append(artist(**kws))\n labels.append(level)\n\n if isinstance(ax_obj, mpl.axes.Axes):\n ax_obj.legend(handles, labels, title=self.variables[\"hue\"], **legend_kws)\n else: # i.e. a FacetGrid. TODO make this better\n legend_data = dict(zip(labels, handles))\n ax_obj.add_legend(\n legend_data,\n title=self.variables[\"hue\"],\n label_order=self.var_levels[\"hue\"],\n **legend_kws\n )\n\n def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 146, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(level)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 148, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " kws = self._artist_kws(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 158, "name": "artist", "kind": "ref", "category": "function", "info": " handles.append(artist(**kws))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 165, "name": "add_legend", "kind": "ref", "category": "function", "info": " ax_obj.add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 172, "name": "_artist_kws", "kind": "def", "category": "function", "info": " def _artist_kws(self, kws, fill, element, multiple, color, alpha):\n \"\"\"Handle differences between artists in filled/unfilled plots.\"\"\"\n kws = kws.copy()\n if fill:\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n kws.setdefault(\"facecolor\", to_rgba(color, alpha))\n\n if element == \"bars\":\n # Make bar() interface with property cycle correctly\n # https://github.com/matplotlib/matplotlib/issues/19385\n kws[\"color\"] = \"none\"\n\n if multiple in [\"stack\", \"fill\"] or element == \"bars\":\n kws.setdefault(\"edgecolor\", mpl.rcParams[\"patch.edgecolor\"])\n else:\n kws.setdefault(\"edgecolor\", to_rgba(color, 1))\n elif element == \"bars\":\n kws[\"facecolor\"] = \"none\"\n kws[\"edgecolor\"] = to_rgba(color, alpha)\n else:\n kws[\"color\"] = to_rgba(color, alpha)\n return kws\n\n def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 176, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 195, "name": "_quantile_to_level", "kind": "def", "category": "function", "info": " def _quantile_to_level(self, data, quantile):\n \"\"\"Return data levels corresponding to quantile cuts of mass.\"\"\"\n isoprop = np.asarray(quantile)\n values = np.ravel(data)\n sorted_values = np.sort(values)[::-1]\n normalized_values = np.cumsum(sorted_values) / values.sum()\n idx = np.searchsorted(normalized_values, 1 - isoprop)\n levels = np.take(sorted_values, idx, mode=\"clip\")\n return levels\n\n def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 205, "name": "_cmap_from_color", "kind": "def", "category": "function", "info": " def _cmap_from_color(self, color):\n \"\"\"Return a sequential colormap given a color seed.\"\"\"\n # Like so much else here, this is broadly useful, but keeping it\n # in this class to signify that I haven't thought overly hard about it...\n r, g, b, _ = to_rgba(color)\n h, s, _ = husl.rgb_to_husl(r, g, b)\n xx = np.linspace(-1, 1, int(1.15 * 256))[:256]\n ramp = np.zeros((256, 3))\n ramp[:, 0] = h\n ramp[:, 1] = s * np.cos(xx)\n ramp[:, 2] = np.linspace(35, 80, 256)\n colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n return mpl.colors.ListedColormap(colors[::-1])\n\n def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 210, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, _ = husl.rgb_to_husl(r, g, b)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 216, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " colors = np.clip([husl.husl_to_rgb(*hsl) for hsl in ramp], 0, 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 217, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(colors[::-1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 219, "name": "_default_discrete", "kind": "def", "category": "function", "info": " def _default_discrete(self):\n \"\"\"Find default values for discrete hist estimation based on variable type.\"\"\"\n if self.univariate:\n discrete = self.var_types[self.data_variable] == \"categorical\"\n else:\n discrete_x = self.var_types[\"x\"] == \"categorical\"\n discrete_y = self.var_types[\"y\"] == \"categorical\"\n discrete = discrete_x, discrete_y\n return discrete\n\n def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 229, "name": "_resolve_multiple", "kind": "def", "category": "function", "info": " def _resolve_multiple(self, curves, multiple):\n \"\"\"Modify the density data structure to handle multiple densities.\"\"\"\n\n # Default baselines have all densities starting at 0\n baselines = {k: np.zeros_like(v) for k, v in curves.items()}\n\n # TODO we should have some central clearinghouse for checking if any\n # \"grouping\" (terminnology?) semantics have been assigned\n if \"hue\" not in self.variables:\n return curves, baselines\n\n if multiple in (\"stack\", \"fill\"):\n\n # Setting stack or fill means that the curves share a\n # support grid / set of bin edges, so we can make a dataframe\n # Reverse the column order to plot from top to bottom\n curves = pd.DataFrame(curves).iloc[:, ::-1]\n\n # Find column groups that are nested within col/row variables\n column_groups = {}\n for i, keyd in enumerate(map(dict, curves.columns)):\n facet_key = keyd.get(\"col\", None), keyd.get(\"row\", None)\n column_groups.setdefault(facet_key, [])\n column_groups[facet_key].append(i)\n\n baselines = curves.copy()\n\n for col_idxs in column_groups.values():\n cols = curves.columns[col_idxs]\n\n norm_constant = curves[cols].sum(axis=\"columns\")\n\n # Take the cumulative sum to stack\n curves[cols] = curves[cols].cumsum(axis=\"columns\")\n\n # Normalize by row sum to fill\n if multiple == \"fill\":\n curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n\n # Define where each segment starts\n baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n\n if multiple == \"dodge\":\n\n # Account for the unique semantic (non-faceting) levels\n # This will require rethiniking if we add other semantics!\n hue_levels = self.var_levels[\"hue\"]\n n = len(hue_levels)\n for key in curves:\n level = dict(key)[\"hue\"]\n hist = curves[key].reset_index(name=\"heights\")\n level_idx = hue_levels.index(level)\n if self._log_scaled(self.data_variable):\n log_min = np.log10(hist[\"edges\"])\n log_max = np.log10(hist[\"edges\"] + hist[\"widths\"])\n log_width = (log_max - log_min) / n\n new_min = np.power(10, log_min + level_idx * log_width)\n new_max = np.power(10, log_min + (level_idx + 1) * log_width)\n hist[\"widths\"] = new_max - new_min\n hist[\"edges\"] = new_min\n else:\n hist[\"widths\"] /= n\n hist[\"edges\"] += level_idx * hist[\"widths\"]\n\n curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n\n return curves, baselines\n\n # -------------------------------------------------------------------------------- #\n # Computation\n # -------------------------------------------------------------------------------- #\n\n def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 266, "name": "div", "kind": "ref", "category": "function", "info": " curves[cols] = curves[cols].div(norm_constant, axis=\"index\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 269, "name": "shift", "kind": "ref", "category": "function", "info": " baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 269, "name": "fillna", "kind": "ref", "category": "function", "info": " baselines[cols] = curves[cols].shift(1, axis=1).fillna(0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 279, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = curves[key].reset_index(name=\"heights\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 281, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 293, "name": "set_index", "kind": "ref", "category": "function", "info": " curves[key] = hist.set_index([\"edges\", \"widths\"])[\"heights\"]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 301, "name": "_compute_univariate_density", "kind": "def", "category": "function", "info": " def _compute_univariate_density(\n self,\n data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular=True,\n ):\n\n # Initialize the estimator object\n estimator = KDE(**estimate_kws)\n\n if set(self.variables) - {\"x\", \"y\"}:\n if common_grid:\n all_observations = self.comp_data.dropna()\n estimator.define_support(all_observations[data_variable])\n else:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n if common_norm and \"weights\" in all_data:\n whole_weight = all_data[\"weights\"].sum()\n else:\n whole_weight = len(all_data)\n\n densities = {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set and remove nulls\n observations = sub_data[data_variable]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n part_weight = weights.sum()\n else:\n weights = None\n part_weight = len(sub_data)\n\n # Estimate the density of observations at this level\n variance = np.nan_to_num(observations.var())\n singular = len(observations) < 2 or math.isclose(variance, 0)\n try:\n if not singular:\n # Convoluted approach needed because numerical failures\n # can manifest in a few different ways.\n density, support = estimator(observations, weights=weights)\n except np.linalg.LinAlgError:\n singular = True\n\n if singular:\n msg = (\n \"Dataset has 0 variance; skipping density estimate. \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=4)\n continue\n\n if log_scale:\n support = np.power(10, support)\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= part_weight / whole_weight\n\n # Store the density for this level\n key = tuple(sub_vars.items())\n densities[key] = pd.Series(density, index=support)\n\n return densities\n\n # -------------------------------------------------------------------------------- #\n # Plotting\n # -------------------------------------------------------------------------------- #\n\n def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 312, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 316, "name": "dropna", "kind": "ref", "category": "function", "info": " all_observations = self.comp_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 317, "name": "define_support", "kind": "ref", "category": "function", "info": " estimator.define_support(all_observations[data_variable])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 321, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 329, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 349, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(observations, weights=weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 379, "name": "plot_univariate_histogram", "kind": "def", "category": "function", "info": " def plot_univariate_histogram(\n self,\n multiple,\n element,\n fill,\n common_norm,\n common_bins,\n shrink,\n kde,\n kde_kws,\n color,\n legend,\n line_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # -- Default keyword dicts\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n line_kws = {} if line_kws is None else line_kws.copy()\n estimate_kws = {} if estimate_kws is None else estimate_kws.copy()\n\n # -- Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n\n auto_bins_with_weights = (\n \"weights\" in self.variables\n and estimate_kws[\"bins\"] == \"auto\"\n and estimate_kws[\"binwidth\"] is None\n and not estimate_kws[\"discrete\"]\n )\n if auto_bins_with_weights:\n msg = (\n \"`bins` cannot be 'auto' when using weights. \"\n \"Setting `bins=10`, but you will likely want to adjust.\"\n )\n warnings.warn(msg, UserWarning)\n estimate_kws[\"bins\"] = 10\n\n # Simplify downstream code if we are not normalizing\n if estimate_kws[\"stat\"] == \"count\":\n common_norm = False\n\n orient = self.data_variable\n\n # Now initialize the Histogram estimator\n estimator = Hist(**estimate_kws)\n histograms = {}\n\n # Do pre-compute housekeeping related to multiple groups\n all_data = self.comp_data.dropna()\n all_weights = all_data.get(\"weights\", None)\n\n multiple_histograms = set(self.variables) - {\"x\", \"y\"}\n if multiple_histograms:\n if common_bins:\n bin_kws = estimator._define_bin_params(all_data, orient, None)\n else:\n common_norm = False\n\n if common_norm and all_weights is not None:\n whole_weight = all_weights.sum()\n else:\n whole_weight = len(all_data)\n\n # Estimate the smoothed kernel densities, for use later\n if kde:\n # TODO alternatively, clip at min/max bins?\n kde_kws.setdefault(\"cut\", 0)\n kde_kws[\"cumulative\"] = estimate_kws[\"cumulative\"]\n log_scale = self._log_scaled(self.data_variable)\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_bins,\n kde_kws,\n log_scale,\n warn_singular=False,\n )\n\n # First pass through the data to compute the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Prepare the relevant data\n key = tuple(sub_vars.items())\n orient = self.data_variable\n\n if \"weights\" in self.variables:\n sub_data[\"weight\"] = sub_data.pop(\"weights\")\n part_weight = sub_data[\"weight\"].sum()\n else:\n part_weight = len(sub_data)\n\n # Do the histogram computation\n if not (multiple_histograms and common_bins):\n bin_kws = estimator._define_bin_params(sub_data, orient, None)\n res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n heights = res[estimator.stat].to_numpy()\n widths = res[\"space\"].to_numpy()\n edges = res[orient].to_numpy() - widths / 2\n\n # Rescale the smoothed curve to match the histogram\n if kde and key in densities:\n density = densities[key]\n if estimator.cumulative:\n hist_norm = heights.max()\n else:\n hist_norm = (heights * widths).sum()\n densities[key] *= hist_norm\n\n # Convert edges back to original units for plotting\n if self._log_scaled(self.data_variable):\n widths = np.power(10, edges + widths) - np.power(10, edges)\n edges = np.power(10, edges)\n\n # Pack the histogram data and metadata together\n edges = edges + (1 - shrink) / 2 * widths\n widths *= shrink\n index = pd.MultiIndex.from_arrays([\n pd.Index(edges, name=\"edges\"),\n pd.Index(widths, name=\"widths\"),\n ])\n hist = pd.Series(heights, index=index, name=\"heights\")\n\n # Apply scaling to normalize across groups\n if common_norm:\n hist *= part_weight / whole_weight\n\n # Store the finalized histogram data for future plotting\n histograms[key] = hist\n\n # Modify the histogram and density data to resolve multiple groups\n histograms, baselines = self._resolve_multiple(histograms, multiple)\n if kde:\n densities, _ = self._resolve_multiple(\n densities, None if multiple == \"dodge\" else multiple\n )\n\n # Set autoscaling-related meta\n sticky_stat = (0, 1) if multiple == \"fill\" else (0, np.inf)\n if multiple == \"fill\":\n # Filled plots should not have any margins\n bin_vals = histograms.index.to_frame()\n edges = bin_vals[\"edges\"]\n widths = bin_vals[\"widths\"]\n sticky_data = (\n edges.min(),\n edges.max() + widths.loc[edges.idxmax()]\n )\n else:\n sticky_data = []\n\n # --- Handle default visual attributes\n\n # Note: default linewidth is determined after plotting\n\n # Default alpha should depend on other parameters\n if fill:\n # Note: will need to account for other grouping semantics if added\n if \"hue\" in self.variables and multiple == \"layer\":\n default_alpha = .5 if element == \"bars\" else .25\n elif kde:\n default_alpha = .5\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n hist_artists = []\n\n # Go back through the dataset and draw the plots\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n key = tuple(sub_vars.items())\n hist = histograms[key].rename(\"heights\").reset_index()\n bottom = np.asarray(baselines[key])\n\n ax = self._get_axes(sub_vars)\n\n # Define the matplotlib attributes that depend on semantic mapping\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, element, multiple, sub_color, alpha\n )\n\n if element == \"bars\":\n\n # Use matplotlib bar plotting\n\n plot_func = ax.bar if self.data_variable == \"x\" else ax.barh\n artists = plot_func(\n hist[\"edges\"],\n hist[\"heights\"] - bottom,\n hist[\"widths\"],\n bottom,\n align=\"edge\",\n **artist_kws,\n )\n\n for bar in artists:\n if self.data_variable == \"x\":\n bar.sticky_edges.x[:] = sticky_data\n bar.sticky_edges.y[:] = sticky_stat\n else:\n bar.sticky_edges.x[:] = sticky_stat\n bar.sticky_edges.y[:] = sticky_data\n\n hist_artists.extend(artists)\n\n else:\n\n # Use either fill_between or plot to draw hull of histogram\n if element == \"step\":\n\n final = hist.iloc[-1]\n x = np.append(hist[\"edges\"], final[\"edges\"] + final[\"widths\"])\n y = np.append(hist[\"heights\"], final[\"heights\"])\n b = np.append(bottom, bottom[-1])\n\n if self.data_variable == \"x\":\n step = \"post\"\n drawstyle = \"steps-post\"\n else:\n step = \"post\" # fillbetweenx handles mapping internally\n drawstyle = \"steps-pre\"\n\n elif element == \"poly\":\n\n x = hist[\"edges\"] + hist[\"widths\"] / 2\n y = hist[\"heights\"]\n b = bottom\n\n step = None\n drawstyle = None\n\n if self.data_variable == \"x\":\n if fill:\n artist = ax.fill_between(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(x, y, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_data\n artist.sticky_edges.y[:] = sticky_stat\n else:\n if fill:\n artist = ax.fill_betweenx(x, b, y, step=step, **artist_kws)\n else:\n artist, = ax.plot(y, x, drawstyle=drawstyle, **artist_kws)\n artist.sticky_edges.x[:] = sticky_stat\n artist.sticky_edges.y[:] = sticky_data\n\n hist_artists.append(artist)\n\n if kde:\n\n # Add in the density curves\n\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n\n if \"x\" in self.variables:\n line_args = support, density\n sticky_x, sticky_y = None, (0, np.inf)\n else:\n line_args = density, support\n sticky_x, sticky_y = (0, np.inf), None\n\n line_kws[\"color\"] = to_rgba(sub_color, 1)\n line, = ax.plot(\n *line_args, **line_kws,\n )\n\n if sticky_x is not None:\n line.sticky_edges.x[:] = sticky_x\n if sticky_y is not None:\n line.sticky_edges.y[:] = sticky_y\n\n if element == \"bars\" and \"linewidth\" not in plot_kws:\n\n # Now we handle linewidth, which depends on the scaling of the plot\n\n # We will base everything on the minimum bin width\n hist_metadata = pd.concat([\n # Use .items for generality over dict or df\n h.index.to_frame() for _, h in histograms.items()\n ]).reset_index(drop=True)\n thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n binwidth = hist_metadata.loc[thin_bar_idx, \"widths\"]\n left_edge = hist_metadata.loc[thin_bar_idx, \"edges\"]\n\n # Set initial value\n default_linewidth = math.inf\n\n # Loop through subsets based only on facet variables\n for sub_vars, _ in self.iter_data():\n\n ax = self._get_axes(sub_vars)\n\n # Needed in some cases to get valid transforms.\n # Innocuous in other cases?\n ax.autoscale_view()\n\n # Convert binwidth from data coordinates to pixels\n pts_x, pts_y = 72 / ax.figure.dpi * abs(\n ax.transData.transform([left_edge + binwidth] * 2)\n - ax.transData.transform([left_edge] * 2)\n )\n if self.data_variable == \"x\":\n binwidth_points = pts_x\n else:\n binwidth_points = pts_y\n\n # The relative size of the lines depends on the appearance\n # This is a provisional value and may need more tweaking\n default_linewidth = min(.1 * binwidth_points, default_linewidth)\n\n # Set the attributes\n for bar in hist_artists:\n\n # Don't let the lines get too thick\n max_linewidth = bar.get_linewidth()\n if not fill:\n max_linewidth *= 1.5\n\n linewidth = min(default_linewidth, max_linewidth)\n\n # If not filling, don't let lines disappear\n if not fill:\n min_linewidth = .5\n linewidth = max(linewidth, min_linewidth)\n\n bar.set_linewidth(linewidth)\n\n # --- Finalize the plot ----\n\n # Axis labels\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = estimator.stat.capitalize()\n if self.data_variable == \"y\":\n default_x = estimator.stat.capitalize()\n self._add_axis_labels(ax, default_x, default_y)\n\n # Legend for semantic variables\n if \"hue\" in self.variables and legend:\n\n if fill or element == \"bars\":\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, element, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 402, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\", \"dodge\"], multiple)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 403, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"element\", [\"bars\", \"step\", \"poly\"], element)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 426, "name": "Hist", "kind": "ref", "category": "function", "info": " estimator = Hist(**estimate_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 430, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 436, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = estimator._define_bin_params(all_data, orient, None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 450, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 451, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 461, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 475, "name": "_define_bin_params", "kind": "ref", "category": "function", "info": " bin_kws = estimator._define_bin_params(sub_data, orient, None)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 476, "name": "_normalize", "kind": "ref", "category": "function", "info": " res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 476, "name": "_eval", "kind": "ref", "category": "function", "info": " res = estimator._normalize(estimator._eval(sub_data, orient, bin_kws))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 477, "name": "to_numpy", "kind": "ref", "category": "function", "info": " heights = res[estimator.stat].to_numpy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 478, "name": "to_numpy", "kind": "ref", "category": "function", "info": " widths = res[\"space\"].to_numpy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 479, "name": "to_numpy", "kind": "ref", "category": "function", "info": " edges = res[orient].to_numpy() - widths / 2\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 491, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 498, "name": "from_arrays", "kind": "ref", "category": "function", "info": " index = pd.MultiIndex.from_arrays([\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 512, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " histograms, baselines = self._resolve_multiple(histograms, multiple)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 514, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, _ = self._resolve_multiple(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 522, "name": "to_frame", "kind": "ref", "category": "function", "info": " bin_vals = histograms.index.to_frame()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 527, "name": "idxmax", "kind": "ref", "category": "function", "info": " edges.max() + widths.loc[edges.idxmax()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 552, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 555, "name": "rename", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 555, "name": "reset_index", "kind": "ref", "category": "function", "info": " hist = histograms[key].rename(\"heights\").reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 558, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 562, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 566, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 575, "name": "plot_func", "kind": "ref", "category": "function", "info": " artists = plot_func(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 671, "name": "to_frame", "kind": "ref", "category": "function", "info": " h.index.to_frame() for _, h in histograms.items()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 672, "name": "reset_index", "kind": "ref", "category": "function", "info": " ]).reset_index(drop=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 673, "name": "idxmin", "kind": "ref", "category": "function", "info": " thin_bar_idx = hist_metadata[\"widths\"].idxmin()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 681, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 683, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 687, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 691, "name": "transform", "kind": "ref", "category": "function", "info": " ax.transData.transform([left_edge + binwidth] * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 692, "name": "transform", "kind": "ref", "category": "function", "info": " - ax.transData.transform([left_edge] * 2)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 729, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 740, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 744, "name": "plot_bivariate_histogram", "kind": "def", "category": "function", "info": " def plot_bivariate_histogram(\n self,\n common_bins, common_norm,\n thresh, pthresh, pmax,\n color, legend,\n cbar, cbar_ax, cbar_kws,\n estimate_kws,\n **plot_kws,\n ):\n\n # Default keyword dicts\n cbar_kws = {} if cbar_kws is None else cbar_kws.copy()\n\n # Now initialize the Histogram estimator\n estimator = Histogram(**estimate_kws)\n\n # Do pre-compute housekeeping related to multiple groups\n if set(self.variables) - {\"x\", \"y\"}:\n all_data = self.comp_data.dropna()\n if common_bins:\n estimator.define_bin_params(\n all_data[\"x\"],\n all_data[\"y\"],\n all_data.get(\"weights\", None),\n )\n else:\n common_norm = False\n\n # -- Determine colormap threshold and norm based on the full data\n\n full_heights = []\n for _, sub_data in self.iter_data(from_comp_data=True):\n sub_heights, _ = estimator(\n sub_data[\"x\"], sub_data[\"y\"], sub_data.get(\"weights\", None)\n )\n full_heights.append(sub_heights)\n\n common_color_norm = not set(self.variables) - {\"x\", \"y\"} or common_norm\n\n if pthresh is not None and common_color_norm:\n thresh = self._quantile_to_level(full_heights, pthresh)\n\n plot_kws.setdefault(\"vmin\", 0)\n if common_color_norm:\n if pmax is not None:\n vmax = self._quantile_to_level(full_heights, pmax)\n else:\n vmax = plot_kws.pop(\"vmax\", max(map(np.max, full_heights)))\n else:\n vmax = None\n\n # Get a default color\n # (We won't follow the color cycle here, as multiple plots are unlikely)\n if color is None:\n color = \"C0\"\n\n # --- Loop over data (subsets) and draw the histograms\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n if sub_data.empty:\n continue\n\n # Do the histogram computation\n heights, (x_edges, y_edges) = estimator(\n sub_data[\"x\"],\n sub_data[\"y\"],\n weights=sub_data.get(\"weights\", None),\n )\n\n # Check for log scaling on the data axis\n if self._log_scaled(\"x\"):\n x_edges = np.power(10, x_edges)\n if self._log_scaled(\"y\"):\n y_edges = np.power(10, y_edges)\n\n # Apply scaling to normalize across groups\n if estimator.stat != \"count\" and common_norm:\n heights *= len(sub_data) / len(all_data)\n\n # Define the specific kwargs for this artist\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n color = self._hue_map(sub_vars[\"hue\"])\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n else:\n cmap = artist_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n elif cmap is None:\n cmap = self._cmap_from_color(color)\n artist_kws[\"cmap\"] = cmap\n\n # Set the upper norm on the colormap\n if not common_color_norm and pmax is not None:\n vmax = self._quantile_to_level(heights, pmax)\n if vmax is not None:\n artist_kws[\"vmax\"] = vmax\n\n # Make cells at or below the threshold transparent\n if not common_color_norm and pthresh:\n thresh = self._quantile_to_level(heights, pthresh)\n if thresh is not None:\n heights = np.ma.masked_less_equal(heights, thresh)\n\n # Get the axes for this plot\n ax = self._get_axes(sub_vars)\n\n # pcolormesh is going to turn the grid off, but we want to keep it\n # I'm not sure if there's a better way to get the grid state\n x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n\n mesh = ax.pcolormesh(\n x_edges,\n y_edges,\n heights.T,\n **artist_kws,\n )\n\n # pcolormesh sets sticky edges, but we only want them if not thresholding\n if thresh is not None:\n mesh.sticky_edges.x[:] = []\n mesh.sticky_edges.y[:] = []\n\n # Add an optional colorbar\n # Note, we want to improve this. When hue is used, it will stack\n # multiple colorbars with redundant ticks in an ugly way.\n # But it's going to take some work to have multiple colorbars that\n # share ticks nicely.\n if cbar:\n ax.figure.colorbar(mesh, cbar_ax, ax, **cbar_kws)\n\n # Reset the grid state\n if x_grid:\n ax.grid(True, axis=\"x\")\n if y_grid:\n ax.grid(True, axis=\"y\")\n\n # --- Finalize the plot\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n artist = partial(mpl.patches.Patch)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, True, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 758, "name": "Histogram", "kind": "ref", "category": "function", "info": " estimator = Histogram(**estimate_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 762, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.comp_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 764, "name": "define_bin_params", "kind": "ref", "category": "function", "info": " estimator.define_bin_params(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 775, "name": "iter_data", "kind": "ref", "category": "function", "info": " for _, sub_data in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 776, "name": "estimator", "kind": "ref", "category": "function", "info": " sub_heights, _ = estimator(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 784, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(full_heights, pthresh)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 789, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(full_heights, pmax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 801, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 807, "name": "estimator", "kind": "ref", "category": "function", "info": " heights, (x_edges, y_edges) = estimator(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 814, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 816, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 826, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 827, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 832, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 834, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 839, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " vmax = self._quantile_to_level(heights, pmax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 845, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " thresh = self._quantile_to_level(heights, pthresh)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 847, "name": "masked_less_equal", "kind": "ref", "category": "function", "info": " heights = np.ma.masked_less_equal(heights, thresh)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 850, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 854, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " x_grid = any([l.get_visible() for l in ax.xaxis.get_gridlines()])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 855, "name": "get_gridlines", "kind": "ref", "category": "function", "info": " y_grid = any([l.get_visible() for l in ax.yaxis.get_gridlines()])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 886, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 897, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 901, "name": "plot_univariate_density", "kind": "def", "category": "function", "info": " def plot_univariate_density(\n self,\n multiple,\n common_norm,\n common_grid,\n warn_singular,\n fill,\n color,\n legend,\n estimate_kws,\n **plot_kws,\n ):\n\n # Handle conditional defaults\n if fill is None:\n fill = multiple in (\"stack\", \"fill\")\n\n # Preprocess the matplotlib keyword dictionaries\n if fill:\n artist = mpl.collections.PolyCollection\n else:\n artist = mpl.lines.Line2D\n plot_kws = _normalize_kwargs(plot_kws, artist)\n\n # Input checking\n _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n\n # Always share the evaluation grid when stacking\n subsets = bool(set(self.variables) - {\"x\", \"y\"})\n if subsets and multiple in (\"stack\", \"fill\"):\n common_grid = True\n\n # Check if the data axis is log scaled\n log_scale = self._log_scaled(self.data_variable)\n\n # Do the computation\n densities = self._compute_univariate_density(\n self.data_variable,\n common_norm,\n common_grid,\n estimate_kws,\n log_scale,\n warn_singular,\n )\n\n # Adjust densities based on the `multiple` rule\n densities, baselines = self._resolve_multiple(densities, multiple)\n\n # Control the interaction with autoscaling by defining sticky_edges\n # i.e. we don't want autoscale margins below the density curve\n sticky_density = (0, 1) if multiple == \"fill\" else (0, np.inf)\n\n if multiple == \"fill\":\n # Filled plots should not have any margins\n sticky_support = densities.index.min(), densities.index.max()\n else:\n sticky_support = []\n\n if fill:\n if multiple == \"layer\":\n default_alpha = .25\n else:\n default_alpha = .75\n else:\n default_alpha = 1\n alpha = plot_kws.pop(\"alpha\", default_alpha) # TODO make parameter?\n\n # Now iterate through the subsets and draw the densities\n # We go backwards so stacked densities read from top-to-bottom\n for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n\n # Extract the support grid and density curve for this level\n key = tuple(sub_vars.items())\n try:\n density = densities[key]\n except KeyError:\n continue\n support = density.index\n fill_from = baselines[key]\n\n ax = self._get_axes(sub_vars)\n\n if \"hue\" in self.variables:\n sub_color = self._hue_map(sub_vars[\"hue\"])\n else:\n sub_color = color\n\n artist_kws = self._artist_kws(\n plot_kws, fill, False, multiple, sub_color, alpha\n )\n\n # Either plot a curve with observation values on the x axis\n if \"x\" in self.variables:\n\n if fill:\n artist = ax.fill_between(support, fill_from, density, **artist_kws)\n\n else:\n artist, = ax.plot(support, density, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_support\n artist.sticky_edges.y[:] = sticky_density\n\n # Or plot a curve with observation values on the y axis\n else:\n if fill:\n artist = ax.fill_betweenx(support, fill_from, density, **artist_kws)\n else:\n artist, = ax.plot(density, support, **artist_kws)\n\n artist.sticky_edges.x[:] = sticky_density\n artist.sticky_edges.y[:] = sticky_support\n\n # --- Finalize the plot ----\n\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = \"Density\"\n if self.data_variable == \"y\":\n default_x = \"Density\"\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, multiple, alpha, plot_kws, {},\n )\n\n def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 923, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " plot_kws = _normalize_kwargs(plot_kws, artist)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 926, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"multiple\", [\"layer\", \"stack\", \"fill\"], multiple)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 934, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " log_scale = self._log_scaled(self.data_variable)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 937, "name": "_compute_univariate_density", "kind": "ref", "category": "function", "info": " densities = self._compute_univariate_density(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 947, "name": "_resolve_multiple", "kind": "ref", "category": "function", "info": " densities, baselines = self._resolve_multiple(densities, multiple)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 970, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\", reverse=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 981, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 984, "name": "_hue_map", "kind": "ref", "category": "function", "info": " sub_color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 988, "name": "_artist_kws", "kind": "ref", "category": "function", "info": " artist_kws = self._artist_kws(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1022, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1032, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1036, "name": "plot_bivariate_density", "kind": "def", "category": "function", "info": " def plot_bivariate_density(\n self,\n common_norm,\n fill,\n levels,\n thresh,\n color,\n legend,\n cbar,\n warn_singular,\n cbar_ax,\n cbar_kws,\n estimate_kws,\n **contour_kws,\n ):\n\n contour_kws = contour_kws.copy()\n\n estimator = KDE(**estimate_kws)\n\n if not set(self.variables) - {\"x\", \"y\"}:\n common_norm = False\n\n all_data = self.plot_data.dropna()\n\n # Loop through the subsets and estimate the KDEs\n densities, supports = {}, {}\n\n for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n\n # Extract the data points from this sub set\n observations = sub_data[[\"x\", \"y\"]]\n min_variance = observations.var().fillna(0).min()\n observations = observations[\"x\"], observations[\"y\"]\n\n # Extract the weights for this subset of observations\n if \"weights\" in self.variables:\n weights = sub_data[\"weights\"]\n else:\n weights = None\n\n # Estimate the density of observations at this level\n singular = math.isclose(min_variance, 0)\n try:\n if not singular:\n density, support = estimator(*observations, weights=weights)\n except np.linalg.LinAlgError:\n # Testing for 0 variance doesn't catch all cases where scipy raises,\n # but we can also get a ValueError, so we need this convoluted approach\n singular = True\n\n if singular:\n msg = (\n \"KDE cannot be estimated (0 variance or perfect covariance). \"\n \"Pass `warn_singular=False` to disable this warning.\"\n )\n if warn_singular:\n warnings.warn(msg, UserWarning, stacklevel=3)\n continue\n\n # Transform the support grid back to the original scale\n xx, yy = support\n if self._log_scaled(\"x\"):\n xx = np.power(10, xx)\n if self._log_scaled(\"y\"):\n yy = np.power(10, yy)\n support = xx, yy\n\n # Apply a scaling factor so that the integral over all subsets is 1\n if common_norm:\n density *= len(sub_data) / len(all_data)\n\n key = tuple(sub_vars.items())\n densities[key] = density\n supports[key] = support\n\n # Define a grid of iso-proportion levels\n if thresh is None:\n thresh = 0\n if isinstance(levels, Number):\n levels = np.linspace(thresh, 1, levels)\n else:\n if min(levels) < 0 or max(levels) > 1:\n raise ValueError(\"levels must be in [0, 1]\")\n\n # Transform from iso-proportions to iso-densities\n if common_norm:\n common_levels = self._quantile_to_level(\n list(densities.values()), levels,\n )\n draw_levels = {k: common_levels for k in densities}\n else:\n draw_levels = {\n k: self._quantile_to_level(d, levels)\n for k, d in densities.items()\n }\n\n # Define the coloring of the contours\n if \"hue\" in self.variables:\n for param in [\"cmap\", \"colors\"]:\n if param in contour_kws:\n msg = f\"{param} parameter ignored when using hue mapping.\"\n warnings.warn(msg, UserWarning)\n contour_kws.pop(param)\n else:\n\n # Work out a default coloring of the contours\n coloring_given = set(contour_kws) & {\"cmap\", \"colors\"}\n if fill and not coloring_given:\n cmap = self._cmap_from_color(color)\n contour_kws[\"cmap\"] = cmap\n if not fill and not coloring_given:\n contour_kws[\"colors\"] = [color]\n\n # Use our internal colormap lookup\n cmap = contour_kws.pop(\"cmap\", None)\n if isinstance(cmap, str):\n cmap = color_palette(cmap, as_cmap=True)\n if cmap is not None:\n contour_kws[\"cmap\"] = cmap\n\n # Loop through the subsets again and plot the data\n for sub_vars, _ in self.iter_data(\"hue\"):\n\n if \"hue\" in sub_vars:\n color = self._hue_map(sub_vars[\"hue\"])\n if fill:\n contour_kws[\"cmap\"] = self._cmap_from_color(color)\n else:\n contour_kws[\"colors\"] = [color]\n\n ax = self._get_axes(sub_vars)\n\n # Choose the function to plot with\n # TODO could add a pcolormesh based option as well\n # Which would look something like element=\"raster\"\n if fill:\n contour_func = ax.contourf\n else:\n contour_func = ax.contour\n\n key = tuple(sub_vars.items())\n if key not in densities:\n continue\n density = densities[key]\n xx, yy = supports[key]\n\n label = contour_kws.pop(\"label\", None)\n\n cset = contour_func(\n xx, yy, density,\n levels=draw_levels[key],\n **contour_kws,\n )\n\n if \"hue\" not in self.variables:\n cset.collections[0].set_label(label)\n\n # Add a color bar representing the contour heights\n # Note: this shows iso densities, not iso proportions\n # See more notes in histplot about how this could be improved\n if cbar:\n cbar_kws = {} if cbar_kws is None else cbar_kws\n ax.figure.colorbar(cset, cbar_ax, ax, **cbar_kws)\n\n # --- Finalize the plot\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n self._add_axis_labels(ax)\n\n if \"hue\" in self.variables and legend:\n\n # TODO if possible, I would like to move the contour\n # intensity information into the legend too and label the\n # iso proportions rather than the raw density values\n\n artist_kws = {}\n if fill:\n artist = partial(mpl.patches.Patch)\n else:\n artist = partial(mpl.lines.Line2D, [], [])\n\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, fill, False, \"layer\", 1, artist_kws, {},\n )\n\n def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1054, "name": "KDE", "kind": "ref", "category": "function", "info": " estimator = KDE(**estimate_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1059, "name": "dropna", "kind": "ref", "category": "function", "info": " all_data = self.plot_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1064, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\"hue\", from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1068, "name": "fillna", "kind": "ref", "category": "function", "info": " min_variance = observations.var().fillna(0).min()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1081, "name": "estimator", "kind": "ref", "category": "function", "info": " density, support = estimator(*observations, weights=weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1098, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"x\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1100, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(\"y\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1123, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " common_levels = self._quantile_to_level(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1129, "name": "_quantile_to_level", "kind": "ref", "category": "function", "info": " k: self._quantile_to_level(d, levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1145, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " cmap = self._cmap_from_color(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1153, "name": "color_palette", "kind": "ref", "category": "function", "info": " cmap = color_palette(cmap, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1158, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, _ in self.iter_data(\"hue\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1161, "name": "_hue_map", "kind": "ref", "category": "function", "info": " color = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1163, "name": "_cmap_from_color", "kind": "ref", "category": "function", "info": " contour_kws[\"cmap\"] = self._cmap_from_color(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1167, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1185, "name": "contour_func", "kind": "ref", "category": "function", "info": " cset = contour_func(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1203, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1218, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1222, "name": "plot_univariate_ecdf", "kind": "def", "category": "function", "info": " def plot_univariate_ecdf(self, estimate_kws, legend, **plot_kws):\n\n estimator = ECDF(**estimate_kws)\n\n # Set the draw style to step the right way for the data variable\n drawstyles = dict(x=\"steps-post\", y=\"steps-pre\")\n plot_kws[\"drawstyle\"] = drawstyles[self.data_variable]\n\n # Loop through the subsets, transform and plot the data\n for sub_vars, sub_data in self.iter_data(\n \"hue\", reverse=True, from_comp_data=True,\n ):\n\n # Compute the ECDF\n if sub_data.empty:\n continue\n\n observations = sub_data[self.data_variable]\n weights = sub_data.get(\"weights\", None)\n stat, vals = estimator(observations, weights=weights)\n\n # Assign attributes based on semantic mapping\n artist_kws = plot_kws.copy()\n if \"hue\" in self.variables:\n artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n\n # Return the data variable to the linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(self.data_variable):\n vals = np.power(10, vals)\n vals[0] = -np.inf\n\n # Work out the orientation of the plot\n if self.data_variable == \"x\":\n plot_args = vals, stat\n stat_variable = \"y\"\n else:\n plot_args = stat, vals\n stat_variable = \"x\"\n\n if estimator.stat == \"count\":\n top_edge = len(observations)\n else:\n top_edge = 1\n\n # Draw the line for this subset\n ax = self._get_axes(sub_vars)\n artist, = ax.plot(*plot_args, **artist_kws)\n sticky_edges = getattr(artist.sticky_edges, stat_variable)\n sticky_edges[:] = 0, top_edge\n\n # --- Finalize the plot ----\n ax = self.ax if self.ax is not None else self.facets.axes.flat[0]\n stat = estimator.stat.capitalize()\n default_x = default_y = \"\"\n if self.data_variable == \"x\":\n default_y = stat\n if self.data_variable == \"y\":\n default_x = stat\n self._add_axis_labels(ax, default_x, default_y)\n\n if \"hue\" in self.variables and legend:\n artist = partial(mpl.lines.Line2D, [], [])\n alpha = plot_kws.get(\"alpha\", 1)\n ax_obj = self.ax if self.ax is not None else self.facets\n self._add_legend(\n ax_obj, artist, False, False, None, alpha, plot_kws, {},\n )\n\n def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1224, "name": "ECDF", "kind": "ref", "category": "function", "info": " estimator = ECDF(**estimate_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1231, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1241, "name": "estimator", "kind": "ref", "category": "function", "info": " stat, vals = estimator(observations, weights=weights)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1246, "name": "_hue_map", "kind": "ref", "category": "function", "info": " artist_kws[\"color\"] = self._hue_map(sub_vars[\"hue\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1250, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(self.data_variable):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1268, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1281, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax, default_x, default_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1287, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1291, "name": "plot_rug", "kind": "def", "category": "function", "info": " def plot_rug(self, height, expand_margins, legend, **kws):\n\n for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n\n ax = self._get_axes(sub_vars)\n\n kws.setdefault(\"linewidth\", 1)\n\n if expand_margins:\n xmarg, ymarg = ax.margins()\n if \"x\" in self.variables:\n ymarg += height * 2\n if \"y\" in self.variables:\n xmarg += height * 2\n ax.margins(x=xmarg, y=ymarg)\n\n if \"hue\" in self.variables:\n kws.pop(\"c\", None)\n kws.pop(\"color\", None)\n\n if \"x\" in self.variables:\n self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n if \"y\" in self.variables:\n self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n\n # --- Finalize the plot\n self._add_axis_labels(ax)\n if \"hue\" in self.variables and legend:\n # TODO ideally i'd like the legend artist to look like a rug\n legend_artist = partial(mpl.lines.Line2D, [], [])\n self._add_legend(\n ax, legend_artist, False, False, None, 1, {}, {},\n )\n\n def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1293, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data, in self.iter_data(from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1295, "name": "_get_axes", "kind": "ref", "category": "function", "info": " ax = self._get_axes(sub_vars)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1312, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"x\", height, ax, kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1314, "name": "_plot_single_rug", "kind": "ref", "category": "function", "info": " self._plot_single_rug(sub_data, \"y\", height, ax, kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1317, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1321, "name": "_add_legend", "kind": "ref", "category": "function", "info": " self._add_legend(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1325, "name": "_plot_single_rug", "kind": "def", "category": "function", "info": " def _plot_single_rug(self, sub_data, var, height, ax, kws):\n \"\"\"Draw a rugplot along one axis of the plot.\"\"\"\n vector = sub_data[var]\n n = len(vector)\n\n # Return data to linear domain\n # This needs an automatic solution; see GH2409\n if self._log_scaled(var):\n vector = np.power(10, vector)\n\n # We'll always add a single collection with varying colors\n if \"hue\" in self.variables:\n colors = self._hue_map(sub_data[\"hue\"])\n else:\n colors = None\n\n # Build the array of values for the LineCollection\n if var == \"x\":\n\n trans = tx.blended_transform_factory(ax.transData, ax.transAxes)\n xy_pairs = np.column_stack([\n np.repeat(vector, 2), np.tile([0, height], n)\n ])\n\n if var == \"y\":\n\n trans = tx.blended_transform_factory(ax.transAxes, ax.transData)\n xy_pairs = np.column_stack([\n np.tile([0, height], n), np.repeat(vector, 2)\n ])\n\n # Draw the lines on the plot\n line_segs = xy_pairs.reshape([n, 2, 2])\n ax.add_collection(LineCollection(\n line_segs, transform=trans, colors=colors, **kws\n ))\n\n ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1332, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1337, "name": "_hue_map", "kind": "ref", "category": "function", "info": " colors = self._hue_map(sub_data[\"hue\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1358, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(LineCollection(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1362, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scalex=var == \"x\", scaley=var == \"y\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1365, "name": "_DistributionFacetPlotter", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1374, "name": "histplot", "kind": "def", "category": "function", "info": "def histplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Histogram computation parameters\n stat=\"count\", bins=\"auto\", binwidth=None, binrange=None,\n discrete=None, cumulative=False, common_bins=True, common_norm=True,\n # Histogram appearance parameters\n multiple=\"layer\", element=\"bars\", fill=True, shrink=1,\n # Histogram smoothing with a kernel density estimate\n kde=False, kde_kws=None, line_kws=None,\n # Bivariate histogram parameters\n thresh=0, pthresh=None, pmax=None, cbar=False, cbar_ax=None, cbar_kws=None,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1395, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1397, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1400, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1405, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1412, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1419, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " discrete = p._default_discrete()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1432, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1450, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1597, "name": "kdeplot", "kind": "def", "category": "function", "info": "def kdeplot(\n data=None, *, x=None, y=None, hue=None, weights=None,\n palette=None, hue_order=None, hue_norm=None, color=None, fill=None,\n multiple=\"layer\", common_norm=True, common_grid=False, cumulative=False,\n bw_method=\"scott\", bw_adjust=1, warn_singular=True, log_scale=None,\n levels=10, thresh=.05, gridsize=200, cut=3, clip=None,\n legend=True, cbar=False, cbar_ax=None, cbar_kws=None, ax=None,\n **kwargs,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1685, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1687, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1690, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1695, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, allowed_types=[\"numeric\", \"datetime\"], log_scale=log_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1698, "name": "_default_color", "kind": "ref", "category": "function", "info": " color = _default_color(method, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1717, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1731, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1877, "name": "ecdfplot", "kind": "def", "category": "function", "info": "def ecdfplot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, weights=None,\n # Computation parameters\n stat=\"proportion\", complementary=False,\n # Hue mapping parameters\n palette=None, hue_order=None, hue_norm=None,\n # Axes information\n log_scale=None, legend=True, ax=None,\n # Other appearance keywords\n **kwargs,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1891, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1893, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1896, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1907, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax, log_scale=log_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1910, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1924, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 1989, "name": "rugplot", "kind": "def", "category": "function", "info": "def rugplot(\n data=None, *, x=None, y=None, hue=None, height=.025, expand_margins=True,\n palette=None, hue_order=None, hue_norm=None, legend=True, ax=None, **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2047, "name": "_DistributionPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2049, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionPlotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2051, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2056, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2059, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2064, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(height, expand_margins, legend, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2111, "name": "displot", "kind": "def", "category": "function", "info": "def displot(\n data=None, *,\n # Vector variables\n x=None, y=None, hue=None, row=None, col=None, weights=None,\n # Other plot parameters\n kind=\"hist\", rug=False, rug_kws=None, log_scale=None, legend=True,\n # Hue-mapping parameters\n palette=None, hue_order=None, hue_norm=None, color=None,\n # Faceting parameters\n col_wrap=None, row_order=None, col_order=None,\n height=5, aspect=1, facet_kws=None,\n **kwargs,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2125, "name": "_DistributionFacetPlotter", "kind": "ref", "category": "function", "info": " p = _DistributionFacetPlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2127, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=_DistributionFacetPlotter.get_semantics(locals())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2130, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2132, "name": "_check_argument", "kind": "ref", "category": "function", "info": " _check_argument(\"kind\", [\"hist\", \"kde\", \"ecdf\"], kind)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2151, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = p.plot_data.rename(columns=p.variables)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2152, "name": "duplicated", "kind": "ref", "category": "function", "info": " grid_data = grid_data.loc[:, ~grid_data.columns.duplicated()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2160, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2173, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(g, allowed_types=allowed_types, log_scale=log_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2193, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, Histogram.__init__, histplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2201, "name": "_default_discrete", "kind": "ref", "category": "function", "info": " estimate_kws[\"discrete\"] = p._default_discrete()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2209, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_univariate_histogram, histplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2210, "name": "plot_univariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_univariate_histogram(**hist_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2214, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(hist_kws, p.plot_bivariate_histogram, histplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2215, "name": "plot_bivariate_histogram", "kind": "ref", "category": "function", "info": " p.plot_bivariate_histogram(**hist_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2223, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, KDE.__init__, kdeplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2234, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_univariate_density, kdeplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2235, "name": "plot_univariate_density", "kind": "ref", "category": "function", "info": " p.plot_univariate_density(**kde_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2239, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(kde_kws, p.plot_bivariate_density, kdeplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2240, "name": "plot_bivariate_density", "kind": "ref", "category": "function", "info": " p.plot_bivariate_density(**kde_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2249, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(estimate_defaults, ECDF.__init__, ecdfplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2258, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(ecdf_kws, p.plot_univariate_ecdf, ecdfplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2259, "name": "plot_univariate_ecdf", "kind": "ref", "category": "function", "info": " p.plot_univariate_ecdf(**ecdf_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2270, "name": "_assign_default_kwargs", "kind": "ref", "category": "function", "info": " _assign_default_kwargs(rug_kws, p.plot_rug, rugplot)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2274, "name": "plot_rug", "kind": "ref", "category": "function", "info": " p.plot_rug(**rug_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2278, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2279, "name": "get_xlabel", "kind": "ref", "category": "function", "info": " x_var=p.variables.get(\"x\", g.axes.flat[0].get_xlabel()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2280, "name": "get_ylabel", "kind": "ref", "category": "function", "info": " y_var=p.variables.get(\"y\", g.axes.flat[0].get_ylabel()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2282, "name": "set_titles", "kind": "ref", "category": "function", "info": " g.set_titles()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2298, "name": "rename", "kind": "ref", "category": "function", "info": " g.data = p.plot_data.rename(columns=wide_cols)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2392, "name": "_freedman_diaconis_bins", "kind": "def", "category": "function", "info": "def _freedman_diaconis_bins(a):\n \"\"\"Calculate number of hist bins using Freedman-Diaconis rule.\"\"\"\n # From https://stats.stackexchange.com/questions/798/\n a = np.asarray(a)\n if len(a) < 2:\n return 1\n iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n h = 2 * iqr / (len(a) ** (1 / 3))\n # fall back to sqrt(a) bins if iqr is 0\n if h == 0:\n return int(np.sqrt(a.size))\n else:\n return int(np.ceil((a.max() - a.min()) / h))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2398, "name": "reduce", "kind": "ref", "category": "function", "info": " iqr = np.subtract.reduce(np.nanpercentile(a, [75, 25]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2407, "name": "distplot", "kind": "def", "category": "function", "info": "def distplot(a=None, bins=None, hist=True, kde=True, rug=False, fit=None,\n hist_kws=None, kde_kws=None, rug_kws=None, fit_kws=None,\n color=None, vertical=False, norm_hist=False, axlabel=None,\n label=None, ax=None, x=None):\n \"\"\"\n DEPRECATED\n\n This function has been deprecated and will be removed in seaborn v0.14.0.\n It has been replaced by :func:`histplot` and :func:`displot`, two functions\n with a modern API and many more capabilities.\n\n For a guide to updating, please see this notebook:\n\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n\n \"\"\"\n\n if kde and not hist:\n axes_level_suggestion = (\n \"`kdeplot` (an axes-level function for kernel density plots)\"\n )\n else:\n axes_level_suggestion = (\n \"`histplot` (an axes-level function for histograms)\"\n )\n\n msg = textwrap.dedent(f\"\"\"\n\n `distplot` is a deprecated function and will be removed in seaborn v0.14.0.\n\n Please adapt your code to use either `displot` (a figure-level function with\n similar flexibility) or {axes_level_suggestion}.\n\n For a guide to updating your code to use the new functions, please see\n https://gist.github.com/mwaskom/de44147ed2974457ad6372750bbe5751\n \"\"\")\n warnings.warn(msg, UserWarning, stacklevel=2)\n\n if ax is None:\n ax = plt.gca()\n\n # Intelligently label the support axis\n label_ax = bool(axlabel)\n if axlabel is None and hasattr(a, \"name\"):\n axlabel = a.name\n if axlabel is not None:\n label_ax = True\n\n # Support new-style API\n if x is not None:\n a = x\n\n # Make a a 1-d float array\n a = np.asarray(a, float)\n if a.ndim > 1:\n a = a.squeeze()\n\n # Drop null values from array\n a = remove_na(a)\n\n # Decide if the hist is normed\n norm_hist = norm_hist or kde or (fit is not None)\n\n # Handle dictionary defaults\n hist_kws = {} if hist_kws is None else hist_kws.copy()\n kde_kws = {} if kde_kws is None else kde_kws.copy()\n rug_kws = {} if rug_kws is None else rug_kws.copy()\n fit_kws = {} if fit_kws is None else fit_kws.copy()\n\n # Get the color from the current color cycle\n if color is None:\n if vertical:\n line, = ax.plot(0, a.mean())\n else:\n line, = ax.plot(a.mean(), 0)\n color = line.get_color()\n line.remove()\n\n # Plug the label into the right kwarg dictionary\n if label is not None:\n if hist:\n hist_kws[\"label\"] = label\n elif kde:\n kde_kws[\"label\"] = label\n elif rug:\n rug_kws[\"label\"] = label\n elif fit:\n fit_kws[\"label\"] = label\n\n if hist:\n if bins is None:\n bins = min(_freedman_diaconis_bins(a), 50)\n hist_kws.setdefault(\"alpha\", 0.4)\n hist_kws.setdefault(\"density\", norm_hist)\n\n orientation = \"horizontal\" if vertical else \"vertical\"\n hist_color = hist_kws.pop(\"color\", color)\n ax.hist(a, bins, orientation=orientation,\n color=hist_color, **hist_kws)\n if hist_color != color:\n hist_kws[\"color\"] = hist_color\n\n axis = \"y\" if vertical else \"x\"\n\n if kde:\n kde_color = kde_kws.pop(\"color\", color)\n kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n if kde_color != color:\n kde_kws[\"color\"] = kde_color\n\n if rug:\n rug_color = rug_kws.pop(\"color\", color)\n rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n if rug_color != color:\n rug_kws[\"color\"] = rug_color\n\n if fit is not None:\n\n def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2465, "name": "remove_na", "kind": "ref", "category": "function", "info": " a = remove_na(a)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2498, "name": "_freedman_diaconis_bins", "kind": "ref", "category": "function", "info": " bins = min(_freedman_diaconis_bins(a), 50)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2513, "name": "kdeplot", "kind": "ref", "category": "function", "info": " kdeplot(**{axis: a}, ax=ax, color=kde_color, **kde_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2519, "name": "rugplot", "kind": "ref", "category": "function", "info": " rugplot(**{axis: a}, ax=ax, color=rug_color, **rug_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2525, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(x):\n return fit.pdf(x, *params)\n\n fit_color = fit_kws.pop(\"color\", \"#282828\")\n gridsize = fit_kws.pop(\"gridsize\", 200)\n cut = fit_kws.pop(\"cut\", 3)\n clip = fit_kws.pop(\"clip\", (-np.inf, np.inf))\n bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n x = _kde_support(a, bw, gridsize, cut, clip)\n params = fit.fit(a)\n y = pdf(x)\n if vertical:\n x, y = y, x\n ax.plot(x, y, color=fit_color, **fit_kws)\n if fit_color != \"#282828\":\n fit_kws[\"color\"] = fit_color\n\n if label_ax:\n if vertical:\n ax.set_ylabel(axlabel)\n else:\n ax.set_xlabel(axlabel)\n\n return ax\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2526, "name": "pdf", "kind": "ref", "category": "function", "info": " return fit.pdf(x, *params)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2532, "name": "gaussian_kde", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2532, "name": "scotts_factor", "kind": "ref", "category": "function", "info": " bw = gaussian_kde(a).scotts_factor() * a.std(ddof=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2533, "name": "_kde_support", "kind": "ref", "category": "function", "info": " x = _kde_support(a, bw, gridsize, cut, clip)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2534, "name": "fit", "kind": "ref", "category": "function", "info": " params = fit.fit(a)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2535, "name": "pdf", "kind": "ref", "category": "function", "info": " y = pdf(x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2544, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(axlabel)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/distributions.py", "rel_fname": "seaborn/distributions.py", "line": 2546, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(axlabel)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 72, "name": "user_cache_dir", "kind": "def", "category": "function", "info": "def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):\n r\"\"\"Return full path to the user-specific cache dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"opinion\" (boolean) can be False to disable the appending of\n \"Cache\" to the base app data dir for Windows. See\n discussion below.\n\n Typical user cache directories are:\n Mac OS X: ~/Library/Caches/\n Unix: ~/.cache/ (XDG default)\n Win XP: C:\\Documents and Settings\\\\Local Settings\\Application Data\\\\\\Cache\n Vista: C:\\Users\\\\AppData\\Local\\\\\\Cache\n\n On Windows the only suggestion in the MSDN docs is that local settings go in\n the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming\n app data dir (the default returned by `user_data_dir` above). Apps typically\n put cache data somewhere *under* the given dir here. Some examples:\n ...\\Mozilla\\Firefox\\Profiles\\\\Cache\n ...\\Acme\\SuperApp\\Cache\\1.0\n OPINION: This function appends \"Cache\" to the `CSIDL_LOCAL_APPDATA` value.\n This can be disabled with the `opinion=False` option.\n \"\"\"\n if system == \"win32\":\n if appauthor is None:\n appauthor = appname\n path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n if appname:\n if appauthor is not False:\n path = os.path.join(path, appauthor, appname)\n else:\n path = os.path.join(path, appname)\n if opinion:\n path = os.path.join(path, \"Cache\")\n elif system == 'darwin':\n path = os.path.expanduser('~/Library/Caches')\n if appname:\n path = os.path.join(path, appname)\n else:\n path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 108, "name": "normpath", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 108, "name": "_get_win_folder", "kind": "ref", "category": "function", "info": " path = os.path.normpath(_get_win_folder(\"CSIDL_LOCAL_APPDATA\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 117, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.path.expanduser('~/Library/Caches')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 121, "name": "expanduser", "kind": "ref", "category": "function", "info": " path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 131, "name": "_get_win_folder_from_registry", "kind": "def", "category": "function", "info": "def _get_win_folder_from_registry(csidl_name):\n \"\"\"This is a fallback technique at best. I'm not sure if using the\n registry for this guarantees us the correct answer for all CSIDL_*\n names.\n \"\"\"\n import winreg as _winreg\n\n shell_folder_name = {\n \"CSIDL_APPDATA\": \"AppData\",\n \"CSIDL_COMMON_APPDATA\": \"Common AppData\",\n \"CSIDL_LOCAL_APPDATA\": \"Local AppData\",\n }[csidl_name]\n\n key = _winreg.OpenKey(\n _winreg.HKEY_CURRENT_USER,\n r\"Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\Shell Folders\"\n )\n dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n return dir\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 144, "name": "OpenKey", "kind": "ref", "category": "function", "info": " key = _winreg.OpenKey(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 148, "name": "QueryValueEx", "kind": "ref", "category": "function", "info": " dir, type = _winreg.QueryValueEx(key, shell_folder_name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 152, "name": "_get_win_folder_with_pywin32", "kind": "def", "category": "function", "info": "def _get_win_folder_with_pywin32(csidl_name):\n from win32com.shell import shellcon, shell\n dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n # Try to make this a unicode path because SHGetFolderPath does\n # not return unicode strings when there is unicode data in the\n # path.\n try:\n dir = unicode(dir)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n try:\n import win32api\n dir = win32api.GetShortPathName(dir)\n except ImportError:\n pass\n except UnicodeError:\n pass\n return dir\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 154, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 159, "name": "unicode", "kind": "ref", "category": "function", "info": " dir = unicode(dir)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 171, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " dir = win32api.GetShortPathName(dir)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 179, "name": "_get_win_folder_with_ctypes", "kind": "def", "category": "function", "info": "def _get_win_folder_with_ctypes(csidl_name):\n import ctypes\n\n csidl_const = {\n \"CSIDL_APPDATA\": 26,\n \"CSIDL_COMMON_APPDATA\": 35,\n \"CSIDL_LOCAL_APPDATA\": 28,\n }[csidl_name]\n\n buf = ctypes.create_unicode_buffer(1024)\n ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in buf:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf2 = ctypes.create_unicode_buffer(1024)\n if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n buf = buf2\n\n return buf.value\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 189, "name": "SHGetFolderPathW", "kind": "ref", "category": "function", "info": " ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 200, "name": "GetShortPathNameW", "kind": "ref", "category": "function", "info": " if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 205, "name": "_get_win_folder_with_jna", "kind": "def", "category": "function", "info": "def _get_win_folder_with_jna(csidl_name):\n import array\n from com.sun import jna\n from com.sun.jna.platform import win32\n\n buf_size = win32.WinDef.MAX_PATH * 2\n buf = array.zeros('c', buf_size)\n shell = win32.Shell32.INSTANCE\n shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n # Downgrade to short path name if have highbit chars. See\n # .\n has_high_char = False\n for c in dir:\n if ord(c) > 255:\n has_high_char = True\n break\n if has_high_char:\n buf = array.zeros('c', buf_size)\n kernel = win32.Kernel32.INSTANCE\n if kernel.GetShortPathName(dir, buf, buf_size):\n dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n\n return dir\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 211, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 213, "name": "SHGetFolderPath", "kind": "ref", "category": "function", "info": " shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 214, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 214, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 224, "name": "zeros", "kind": "ref", "category": "function", "info": " buf = array.zeros('c', buf_size)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 226, "name": "GetShortPathName", "kind": "ref", "category": "function", "info": " if kernel.GetShortPathName(dir, buf, buf_size):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 227, "name": "toString", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/appdirs.py", "rel_fname": "seaborn/external/appdirs.py", "line": 227, "name": "tostring", "kind": "ref", "category": "function", "info": " dir = jna.Native.toString(buf.tostring()).rstrip(\"\\0\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 39, "name": "strip_blank_lines", "kind": "def", "category": "function", "info": "def strip_blank_lines(l):\n \"Remove leading and trailing blank lines from a list of lines\"\n while l and not l[0].strip():\n del l[0]\n while l and not l[-1].strip():\n del l[-1]\n return l\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 48, "name": "Reader", "kind": "def", "category": "class", "info": "__init__\t__getitem__\treset\tread\tseek_next_non_empty_line\teof\tread_to_condition\tread_to_next_empty_line\tread_to_next_unindented_line\tpeek\tis_empty"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 65, "name": "reset", "kind": "ref", "category": "function", "info": " self.reset()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 70, "name": "reset", "kind": "def", "category": "function", "info": " def reset(self):\n self._l = 0 # current line nr\n\n def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 73, "name": "read", "kind": "def", "category": "function", "info": " def read(self):\n if not self.eof():\n out = self[self._l]\n self._l += 1\n return out\n else:\n return ''\n\n def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 74, "name": "eof", "kind": "ref", "category": "function", "info": " if not self.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 81, "name": "seek_next_non_empty_line", "kind": "def", "category": "function", "info": " def seek_next_non_empty_line(self):\n for l in self[self._l:]:\n if l.strip():\n break\n else:\n self._l += 1\n\n def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 88, "name": "eof", "kind": "def", "category": "function", "info": " def eof(self):\n return self._l >= len(self._str)\n\n def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 91, "name": "read_to_condition", "kind": "def", "category": "function", "info": " def read_to_condition(self, condition_func):\n start = self._l\n for line in self[start:]:\n if condition_func(line):\n return self[start:self._l]\n self._l += 1\n if self.eof():\n return self[start:self._l+1]\n return []\n\n def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 94, "name": "condition_func", "kind": "ref", "category": "function", "info": " if condition_func(line):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 97, "name": "eof", "kind": "ref", "category": "function", "info": " if self.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 101, "name": "read_to_next_empty_line", "kind": "def", "category": "function", "info": " def read_to_next_empty_line(self):\n self.seek_next_non_empty_line()\n\n def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 102, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self.seek_next_non_empty_line()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 104, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(line):\n return not line.strip()\n\n return self.read_to_condition(is_empty)\n\n def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 107, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_empty)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 109, "name": "read_to_next_unindented_line", "kind": "def", "category": "function", "info": " def read_to_next_unindented_line(self):\n def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 110, "name": "is_unindented", "kind": "def", "category": "function", "info": " def is_unindented(line):\n return (line.strip() and (len(line.lstrip()) == len(line)))\n return self.read_to_condition(is_unindented)\n\n def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 112, "name": "read_to_condition", "kind": "ref", "category": "function", "info": " return self.read_to_condition(is_unindented)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 114, "name": "peek", "kind": "def", "category": "function", "info": " def peek(self, n=0):\n if self._l + n < len(self._str):\n return self[self._l + n]\n else:\n return ''\n\n def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 120, "name": "is_empty", "kind": "def", "category": "function", "info": " def is_empty(self):\n return not ''.join(self._str).strip()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 124, "name": "ParseError", "kind": "def", "category": "class", "info": "__str__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 135, "name": "NumpyDocString", "kind": "def", "category": "class", "info": "__init__\t__getitem__\t__setitem__\t__iter__\t__len__\t_is_at_section\t_strip\t_read_to_next_section\t_read_sections\t_parse_param_list\t_parse_see_also\t_parse_index\t_parse_summary\t_parse\t_error_location\t_str_header\t_str_indent\t_str_signature\t_str_summary\t_str_extended_summary\t_str_param_list\t_str_section\t_str_see_also\t_str_index\t__str__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 167, "name": "Reader", "kind": "ref", "category": "function", "info": " self._doc = Reader(docstring)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 171, "name": "_parse", "kind": "ref", "category": "function", "info": " self._parse()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 181, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"Unknown section {key}\", error=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 191, "name": "_is_at_section", "kind": "def", "category": "function", "info": " def _is_at_section(self):\n self._doc.seek_next_non_empty_line()\n\n if self._doc.eof():\n return False\n\n l1 = self._doc.peek().strip() # e.g. Parameters\n\n if l1.startswith('.. index::'):\n return True\n\n l2 = self._doc.peek(1).strip() # ---------- or ==========\n return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))\n\n def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 192, "name": "seek_next_non_empty_line", "kind": "ref", "category": "function", "info": " self._doc.seek_next_non_empty_line()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 194, "name": "eof", "kind": "ref", "category": "function", "info": " if self._doc.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 197, "name": "peek", "kind": "ref", "category": "function", "info": " l1 = self._doc.peek().strip() # e.g. Parameters\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 202, "name": "peek", "kind": "ref", "category": "function", "info": " l2 = self._doc.peek(1).strip() # ---------- or ==========\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 205, "name": "_strip", "kind": "def", "category": "function", "info": " def _strip(self, doc):\n i = 0\n j = 0\n for i, line in enumerate(doc):\n if line.strip():\n break\n\n for j, line in enumerate(doc[::-1]):\n if line.strip():\n break\n\n return doc[i:len(doc)-j]\n\n def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 218, "name": "_read_to_next_section", "kind": "def", "category": "function", "info": " def _read_to_next_section(self):\n section = self._doc.read_to_next_empty_line()\n\n while not self._is_at_section() and not self._doc.eof():\n if not self._doc.peek(-1).strip(): # previous line was empty\n section += ['']\n\n section += self._doc.read_to_next_empty_line()\n\n return section\n\n def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 219, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 221, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._is_at_section() and not self._doc.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 222, "name": "peek", "kind": "ref", "category": "function", "info": " if not self._doc.peek(-1).strip(): # previous line was empty\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 225, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " section += self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 229, "name": "_read_sections", "kind": "def", "category": "function", "info": " def _read_sections(self):\n while not self._doc.eof():\n data = self._read_to_next_section()\n name = data[0].strip()\n\n if name.startswith('..'): # index section\n yield name, data[1:]\n elif len(data) < 2:\n yield StopIteration\n else:\n yield name, self._strip(data[2:])\n\n def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 230, "name": "eof", "kind": "ref", "category": "function", "info": " while not self._doc.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 231, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " data = self._read_to_next_section()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 239, "name": "_strip", "kind": "ref", "category": "function", "info": " yield name, self._strip(data[2:])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 241, "name": "_parse_param_list", "kind": "def", "category": "function", "info": " def _parse_param_list(self, content, single_element_is_type=False):\n r = Reader(content)\n params = []\n while not r.eof():\n header = r.read().strip()\n if ' : ' in header:\n arg_name, arg_type = header.split(' : ')[:2]\n else:\n if single_element_is_type:\n arg_name, arg_type = '', header\n else:\n arg_name, arg_type = header, ''\n\n desc = r.read_to_next_unindented_line()\n desc = dedent_lines(desc)\n desc = strip_blank_lines(desc)\n\n params.append(Parameter(arg_name, arg_type, desc))\n\n return params\n\n # See also supports the following formats.\n #\n # \n # SPACE* COLON SPACE+ SPACE*\n # ( COMMA SPACE+ )+ (COMMA | PERIOD)? SPACE*\n # ( COMMA SPACE+ )* SPACE* COLON SPACE+ SPACE*\n\n # is one of\n # \n # COLON COLON BACKTICK BACKTICK\n # where\n # is a legal function name, and\n # is any nonempty sequence of word characters.\n # Examples: func_f1 :meth:`func_h1` :obj:`~baz.obj_r` :class:`class_j`\n # is a string describing the function.\n\n _role = r\":(?P\\w+):\"\n _funcbacktick = r\"`(?P(?:~\\w+\\.)?[a-zA-Z0-9_\\.-]+)`\"\n _funcplain = r\"(?P[a-zA-Z0-9_\\.-]+)\"\n _funcname = r\"(\" + _role + _funcbacktick + r\"|\" + _funcplain + r\")\"\n _funcnamenext = _funcname.replace('role', 'rolenext')\n _funcnamenext = _funcnamenext.replace('name', 'namenext')\n _description = r\"(?P\\s*:(\\s+(?P\\S+.*))?)?\\s*$\"\n _func_rgx = re.compile(r\"^\\s*\" + _funcname + r\"\\s*\")\n _line_rgx = re.compile(\n r\"^\\s*\" +\n r\"(?P\" + # group for all function names\n _funcname +\n r\"(?P([,]\\s+\" + _funcnamenext + r\")*)\" +\n r\")\" + # end of \"allfuncs\"\n r\"(?P[,\\.])?\" + # Some function lists have a trailing comma (or period) '\\s*'\n _description)\n\n # Empty elements are replaced with '..'\n empty_description = '..'\n\n def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 242, "name": "Reader", "kind": "ref", "category": "function", "info": " r = Reader(content)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 244, "name": "eof", "kind": "ref", "category": "function", "info": " while not r.eof():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 245, "name": "read", "kind": "ref", "category": "function", "info": " header = r.read().strip()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 254, "name": "read_to_next_unindented_line", "kind": "ref", "category": "function", "info": " desc = r.read_to_next_unindented_line()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 255, "name": "dedent_lines", "kind": "ref", "category": "function", "info": " desc = dedent_lines(desc)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 256, "name": "strip_blank_lines", "kind": "ref", "category": "function", "info": " desc = strip_blank_lines(desc)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 298, "name": "_parse_see_also", "kind": "def", "category": "function", "info": " def _parse_see_also(self, content):\n \"\"\"\n func_name : Descriptive text\n continued text\n another_func_name : Descriptive text\n func_name1, func_name2, :meth:`func_name`, func_name3\n\n \"\"\"\n\n items = []\n\n def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 309, "name": "parse_item_name", "kind": "def", "category": "function", "info": " def parse_item_name(text):\n \"\"\"Match ':role:`name`' or 'name'.\"\"\"\n m = self._func_rgx.match(text)\n if not m:\n raise ParseError(f\"{text} is not a item name\")\n role = m.group('role')\n name = m.group('name') if role else m.group('name2')\n return name, role, m.end()\n\n rest = []\n for line in content:\n if not line.strip():\n continue\n\n line_match = self._line_rgx.match(line)\n description = None\n if line_match:\n description = line_match.group('desc')\n if line_match.group('trailing') and description:\n self._error_location(\n 'Unexpected comma or period after function list at index %d of '\n 'line \"%s\"' % (line_match.end('trailing'), line),\n error=False)\n if not description and line.startswith(' '):\n rest.append(line.strip())\n elif line_match:\n funcs = []\n text = line_match.group('allfuncs')\n while True:\n if not text.strip():\n break\n name, role, match_end = parse_item_name(text)\n funcs.append((name, role))\n text = text[match_end:].strip()\n if text and text[0] == ',':\n text = text[1:].strip()\n rest = list(filter(None, [description]))\n items.append((funcs, rest))\n else:\n raise ParseError(f\"{line} is not a item name\")\n return items\n\n def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 313, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{text} is not a item name\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 314, "name": "group", "kind": "ref", "category": "function", "info": " role = m.group('role')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 315, "name": "group", "kind": "ref", "category": "function", "info": " name = m.group('name') if role else m.group('name2')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 316, "name": "end", "kind": "ref", "category": "function", "info": " return name, role, m.end()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 326, "name": "group", "kind": "ref", "category": "function", "info": " description = line_match.group('desc')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 327, "name": "group", "kind": "ref", "category": "function", "info": " if line_match.group('trailing') and description:\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 328, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 330, "name": "end", "kind": "ref", "category": "function", "info": " 'line \"%s\"' % (line_match.end('trailing'), line),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 336, "name": "group", "kind": "ref", "category": "function", "info": " text = line_match.group('allfuncs')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 340, "name": "parse_item_name", "kind": "ref", "category": "function", "info": " name, role, match_end = parse_item_name(text)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 348, "name": "ParseError", "kind": "ref", "category": "function", "info": " raise ParseError(f\"{line} is not a item name\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 351, "name": "_parse_index", "kind": "def", "category": "function", "info": " def _parse_index(self, section, content):\n \"\"\"\n .. index: default\n :refguide: something, else, and more\n\n \"\"\"\n def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 357, "name": "strip_each_in", "kind": "def", "category": "function", "info": " def strip_each_in(lst):\n return [s.strip() for s in lst]\n\n out = {}\n section = section.split('::')\n if len(section) > 1:\n out['default'] = strip_each_in(section[1].split(','))[0]\n for line in content:\n line = line.split(':')\n if len(line) > 2:\n out[line[1]] = strip_each_in(line[2].split(','))\n return out\n\n def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 363, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out['default'] = strip_each_in(section[1].split(','))[0]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 367, "name": "strip_each_in", "kind": "ref", "category": "function", "info": " out[line[1]] = strip_each_in(line[2].split(','))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 370, "name": "_parse_summary", "kind": "def", "category": "function", "info": " def _parse_summary(self):\n \"\"\"Grab signature (if given) and summary\"\"\"\n if self._is_at_section():\n return\n\n # If several signatures present, take the last one\n while True:\n summary = self._doc.read_to_next_empty_line()\n summary_str = \" \".join([s.strip() for s in summary]).strip()\n compiled = re.compile(r'^([\\w., ]+=)?\\s*[\\w\\.]+\\(.*\\)$')\n if compiled.match(summary_str):\n self['Signature'] = summary_str\n if not self._is_at_section():\n continue\n break\n\n if summary is not None:\n self['Summary'] = summary\n\n if not self._is_at_section():\n self['Extended Summary'] = self._read_to_next_section()\n\n def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 372, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if self._is_at_section():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 377, "name": "read_to_next_empty_line", "kind": "ref", "category": "function", "info": " summary = self._doc.read_to_next_empty_line()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 382, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 389, "name": "_is_at_section", "kind": "ref", "category": "function", "info": " if not self._is_at_section():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 390, "name": "_read_to_next_section", "kind": "ref", "category": "function", "info": " self['Extended Summary'] = self._read_to_next_section()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 392, "name": "_parse", "kind": "def", "category": "function", "info": " def _parse(self):\n self._doc.reset()\n self._parse_summary()\n\n sections = list(self._read_sections())\n section_names = {section for section, content in sections}\n\n has_returns = 'Returns' in section_names\n has_yields = 'Yields' in section_names\n # We could do more tests, but we are not. Arbitrarily.\n if has_returns and has_yields:\n msg = 'Docstring contains both a Returns and Yields section.'\n raise ValueError(msg)\n if not has_yields and 'Receives' in section_names:\n msg = 'Docstring contains a Receives section but not Yields.'\n raise ValueError(msg)\n\n for (section, content) in sections:\n if not section.startswith('..'):\n section = (s.capitalize() for s in section.split(' '))\n section = ' '.join(section)\n if self.get(section):\n self._error_location(f\"The section {section} appears twice\")\n\n if section in ('Parameters', 'Other Parameters', 'Attributes',\n 'Methods'):\n self[section] = self._parse_param_list(content)\n elif section in ('Returns', 'Yields', 'Raises', 'Warns', 'Receives'):\n self[section] = self._parse_param_list(\n content, single_element_is_type=True)\n elif section.startswith('.. index::'):\n self['index'] = self._parse_index(section, content)\n elif section == 'See Also':\n self['See Also'] = self._parse_see_also(content)\n else:\n self[section] = content\n\n def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 393, "name": "reset", "kind": "ref", "category": "function", "info": " self._doc.reset()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 394, "name": "_parse_summary", "kind": "ref", "category": "function", "info": " self._parse_summary()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 396, "name": "_read_sections", "kind": "ref", "category": "function", "info": " sections = list(self._read_sections())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 414, "name": "_error_location", "kind": "ref", "category": "function", "info": " self._error_location(f\"The section {section} appears twice\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 418, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(content)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 420, "name": "_parse_param_list", "kind": "ref", "category": "function", "info": " self[section] = self._parse_param_list(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 423, "name": "_parse_index", "kind": "ref", "category": "function", "info": " self['index'] = self._parse_index(section, content)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 425, "name": "_parse_see_also", "kind": "ref", "category": "function", "info": " self['See Also'] = self._parse_see_also(content)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 429, "name": "_error_location", "kind": "def", "category": "function", "info": " def _error_location(self, msg, error=True):\n if hasattr(self, '_obj'):\n # we know where the docs came from:\n try:\n filename = inspect.getsourcefile(self._obj)\n except TypeError:\n filename = None\n msg = msg + f\" in the docstring of {self._obj} in {filename}.\"\n if error:\n raise ValueError(msg)\n else:\n warn(msg)\n\n # string conversion routines\n\n def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 444, "name": "_str_header", "kind": "def", "category": "function", "info": " def _str_header(self, name, symbol='-'):\n return [name, len(name)*symbol]\n\n def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 447, "name": "_str_indent", "kind": "def", "category": "function", "info": " def _str_indent(self, doc, indent=4):\n out = []\n for line in doc:\n out += [' '*indent + line]\n return out\n\n def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 453, "name": "_str_signature", "kind": "def", "category": "function", "info": " def _str_signature(self):\n if self['Signature']:\n return [self['Signature'].replace('*', r'\\*')] + ['']\n else:\n return ['']\n\n def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 459, "name": "_str_summary", "kind": "def", "category": "function", "info": " def _str_summary(self):\n if self['Summary']:\n return self['Summary'] + ['']\n else:\n return []\n\n def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 465, "name": "_str_extended_summary", "kind": "def", "category": "function", "info": " def _str_extended_summary(self):\n if self['Extended Summary']:\n return self['Extended Summary'] + ['']\n else:\n return []\n\n def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 471, "name": "_str_param_list", "kind": "def", "category": "function", "info": " def _str_param_list(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n for param in self[name]:\n parts = []\n if param.name:\n parts.append(param.name)\n if param.type:\n parts.append(param.type)\n out += [' : '.join(parts)]\n if param.desc and ''.join(param.desc).strip():\n out += self._str_indent(param.desc)\n out += ['']\n return out\n\n def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 474, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 483, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent(param.desc)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 487, "name": "_str_section", "kind": "def", "category": "function", "info": " def _str_section(self, name):\n out = []\n if self[name]:\n out += self._str_header(name)\n out += self[name]\n out += ['']\n return out\n\n def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 490, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 495, "name": "_str_see_also", "kind": "def", "category": "function", "info": " def _str_see_also(self, func_role):\n if not self['See Also']:\n return []\n out = []\n out += self._str_header(\"See Also\")\n out += ['']\n last_had_desc = True\n for funcs, desc in self['See Also']:\n assert isinstance(funcs, list)\n links = []\n for func, role in funcs:\n if role:\n link = f':{role}:`{func}`'\n elif func_role:\n link = f':{func_role}:`{func}`'\n else:\n link = f\"`{func}`_\"\n links.append(link)\n link = ', '.join(links)\n out += [link]\n if desc:\n out += self._str_indent([' '.join(desc)])\n last_had_desc = True\n else:\n last_had_desc = False\n out += self._str_indent([self.empty_description])\n\n if last_had_desc:\n out += ['']\n out += ['']\n return out\n\n def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 499, "name": "_str_header", "kind": "ref", "category": "function", "info": " out += self._str_header(\"See Also\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 516, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([' '.join(desc)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 520, "name": "_str_indent", "kind": "ref", "category": "function", "info": " out += self._str_indent([self.empty_description])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 527, "name": "_str_index", "kind": "def", "category": "function", "info": " def _str_index(self):\n idx = self['index']\n out = []\n output_index = False\n default_index = idx.get('default', '')\n if default_index:\n output_index = True\n out += [f'.. index:: {default_index}']\n for section, references in idx.items():\n if section == 'default':\n continue\n output_index = True\n out += [f\" :{section}: {', '.join(references)}\"]\n if output_index:\n return out\n else:\n return ''\n\n def __str__(self, func_role=''):\n out = []\n out += self._str_signature()\n out += self._str_summary()\n out += self._str_extended_summary()\n for param_list in ('Parameters', 'Returns', 'Yields', 'Receives',\n 'Other Parameters', 'Raises', 'Warns'):\n out += self._str_param_list(param_list)\n out += self._str_section('Warnings')\n out += self._str_see_also(func_role)\n for s in ('Notes', 'References', 'Examples'):\n out += self._str_section(s)\n for param_list in ('Attributes', 'Methods'):\n out += self._str_param_list(param_list)\n out += self._str_index()\n return '\\n'.join(out)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 547, "name": "_str_signature", "kind": "ref", "category": "function", "info": " out += self._str_signature()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 548, "name": "_str_summary", "kind": "ref", "category": "function", "info": " out += self._str_summary()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 549, "name": "_str_extended_summary", "kind": "ref", "category": "function", "info": " out += self._str_extended_summary()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 552, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 553, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section('Warnings')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 554, "name": "_str_see_also", "kind": "ref", "category": "function", "info": " out += self._str_see_also(func_role)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 556, "name": "_str_section", "kind": "ref", "category": "function", "info": " out += self._str_section(s)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 558, "name": "_str_param_list", "kind": "ref", "category": "function", "info": " out += self._str_param_list(param_list)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 559, "name": "_str_index", "kind": "ref", "category": "function", "info": " out += self._str_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 571, "name": "dedent_lines", "kind": "def", "category": "function", "info": "def dedent_lines(lines):\n \"\"\"Deindent a list of lines maximally\"\"\"\n return textwrap.dedent(\"\\n\".join(lines)).split(\"\\n\")\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 576, "name": "header", "kind": "def", "category": "function", "info": "def header(text, style='-'):\n return text + '\\n' + style*len(text) + '\\n'\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 580, "name": "FunctionDoc", "kind": "def", "category": "class", "info": "__init__\tget_func\t__str__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 592, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 608, "name": "get_func", "kind": "def", "category": "function", "info": " def get_func(self):\n func_name = getattr(self._f, '__name__', self.__class__.__name__)\n if inspect.isclass(self._f):\n func = getattr(self._f, '__call__', self._f.__init__)\n else:\n func = self._f\n return func, func_name\n\n def __str__(self):\n out = ''\n\n func, func_name = self.get_func()\n\n roles = {'func': 'function',\n 'meth': 'method'}\n\n if self._role:\n if self._role not in roles:\n print(f\"Warning: invalid role {self._role}\")\n out += f\".. {roles.get(self._role, '')}:: {func_name}\\n \\n\\n\"\n\n out += super().__str__(func_role=self._role)\n return out\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 619, "name": "get_func", "kind": "ref", "category": "function", "info": " func, func_name = self.get_func()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 633, "name": "ClassDoc", "kind": "def", "category": "class", "info": "__init__\tmethods\tproperties\t_is_show_member"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 668, "name": "splitlines_x", "kind": "def", "category": "function", "info": " def splitlines_x(s):\n if not s:\n return []\n else:\n return s.splitlines()\n for field, items in [('Methods', self.methods),\n ('Attributes', self.properties)]:\n if not self[field]:\n doc_list = []\n for name in sorted(items):\n if (name in _exclude or\n (_members and name not in _members)):\n continue\n try:\n doc_item = pydoc.getdoc(getattr(self._cls, name))\n doc_list.append(\n Parameter(name, '', splitlines_x(doc_item)))\n except AttributeError:\n pass # method doesn't exist\n self[field] = doc_list\n\n @property\n def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 684, "name": "splitlines_x", "kind": "ref", "category": "function", "info": " Parameter(name, '', splitlines_x(doc_item)))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 690, "name": "methods", "kind": "def", "category": "function", "info": " def methods(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if ((not name.startswith('_')\n or name in self.extra_public_methods)\n and isinstance(func, Callable)\n and self._is_show_member(name))]\n\n @property\n def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 697, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 700, "name": "properties", "kind": "def", "category": "function", "info": " def properties(self):\n if self._cls is None:\n return []\n return [name for name, func in inspect.getmembers(self._cls)\n if (not name.startswith('_') and\n (func is None or isinstance(func, property) or\n inspect.isdatadescriptor(func))\n and self._is_show_member(name))]\n\n def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 707, "name": "_is_show_member", "kind": "ref", "category": "function", "info": " and self._is_show_member(name))]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/docscrape.py", "rel_fname": "seaborn/external/docscrape.py", "line": 709, "name": "_is_show_member", "kind": "def", "category": "function", "info": " def _is_show_member(self, name):\n if self.show_inherited_members:\n return True # show all class members\n if name not in self._cls.__dict__:\n return False # class member is inherited, we do not show it\n return True\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 30, "name": "husl_to_rgb", "kind": "def", "category": "function", "info": "def husl_to_rgb(h, s, l):\n return lch_to_rgb(*husl_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 31, "name": "husl_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*husl_to_lch([h, s, l]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 34, "name": "husl_to_hex", "kind": "def", "category": "function", "info": "def husl_to_hex(h, s, l):\n return rgb_to_hex(husl_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 35, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(husl_to_rgb(h, s, l))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 38, "name": "rgb_to_husl", "kind": "def", "category": "function", "info": "def rgb_to_husl(r, g, b):\n return lch_to_husl(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "lch_to_husl", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 39, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_husl(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 42, "name": "hex_to_husl", "kind": "def", "category": "function", "info": "def hex_to_husl(hex):\n return rgb_to_husl(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 43, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_husl(*hex_to_rgb(hex))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 46, "name": "huslp_to_rgb", "kind": "def", "category": "function", "info": "def huslp_to_rgb(h, s, l):\n return lch_to_rgb(*huslp_to_lch([h, s, l]))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "lch_to_rgb", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 47, "name": "huslp_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_rgb(*huslp_to_lch([h, s, l]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 50, "name": "huslp_to_hex", "kind": "def", "category": "function", "info": "def huslp_to_hex(h, s, l):\n return rgb_to_hex(huslp_to_rgb(h, s, l))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "rgb_to_hex", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 51, "name": "huslp_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_hex(huslp_to_rgb(h, s, l))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 54, "name": "rgb_to_huslp", "kind": "def", "category": "function", "info": "def rgb_to_huslp(r, g, b):\n return lch_to_huslp(rgb_to_lch(r, g, b))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "lch_to_huslp", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 55, "name": "rgb_to_lch", "kind": "ref", "category": "function", "info": " return lch_to_huslp(rgb_to_lch(r, g, b))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 58, "name": "hex_to_huslp", "kind": "def", "category": "function", "info": "def hex_to_huslp(hex):\n return rgb_to_huslp(*hex_to_rgb(hex))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "rgb_to_huslp", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 59, "name": "hex_to_rgb", "kind": "ref", "category": "function", "info": " return rgb_to_huslp(*hex_to_rgb(hex))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 62, "name": "lch_to_rgb", "kind": "def", "category": "function", "info": "def lch_to_rgb(l, c, h):\n return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "xyz_to_rgb", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "luv_to_xyz", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 63, "name": "lch_to_luv", "kind": "ref", "category": "function", "info": " return xyz_to_rgb(luv_to_xyz(lch_to_luv([l, c, h])))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 66, "name": "rgb_to_lch", "kind": "def", "category": "function", "info": "def rgb_to_lch(r, g, b):\n return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "luv_to_lch", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "xyz_to_luv", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 67, "name": "rgb_to_xyz", "kind": "ref", "category": "function", "info": " return luv_to_lch(xyz_to_luv(rgb_to_xyz([r, g, b])))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 70, "name": "max_chroma", "kind": "def", "category": "function", "info": "def max_chroma(L, H):\n hrad = math.radians(H)\n sinH = (math.sin(hrad))\n cosH = (math.cos(hrad))\n sub1 = (math.pow(L + 16, 3.0) / 1560896.0)\n sub2 = sub1 if sub1 > 0.008856 else (L / 903.3)\n result = float(\"inf\")\n for row in m:\n m1 = row[0]\n m2 = row[1]\n m3 = row[2]\n top = ((0.99915 * m1 + 1.05122 * m2 + 1.14460 * m3) * sub2)\n rbottom = (0.86330 * m3 - 0.17266 * m2)\n lbottom = (0.12949 * m3 - 0.38848 * m1)\n bottom = (rbottom * sinH + lbottom * cosH) * sub2\n\n for t in (0.0, 1.0):\n C = (L * (top - 1.05122 * t) / (bottom + 0.17266 * sinH * t))\n if C > 0.0 and C < result:\n result = C\n return result\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 93, "name": "_hrad_extremum", "kind": "def", "category": "function", "info": "def _hrad_extremum(L):\n lhs = (math.pow(L, 3.0) + 48.0 * math.pow(L, 2.0) + 768.0 * L + 4096.0) / 1560896.0\n rhs = 1107.0 / 125000.0\n sub = lhs if lhs > rhs else 10.0 * L / 9033.0\n chroma = float(\"inf\")\n result = None\n for row in m:\n for limit in (0.0, 1.0):\n [m1, m2, m3] = row\n top = -3015466475.0 * m3 * sub + 603093295.0 * m2 * sub - 603093295.0 * limit\n bottom = 1356959916.0 * m1 * sub - 452319972.0 * m3 * sub\n hrad = math.atan2(top, bottom)\n # This is a math hack to deal with tan quadrants, I'm too lazy to figure\n # out how to do this properly\n if limit == 0.0:\n hrad += math.pi\n test = max_chroma(L, math.degrees(hrad))\n if test < chroma:\n chroma = test\n result = hrad\n return result\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 109, "name": "max_chroma", "kind": "ref", "category": "function", "info": " test = max_chroma(L, math.degrees(hrad))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 116, "name": "max_chroma_pastel", "kind": "def", "category": "function", "info": "def max_chroma_pastel(L):\n H = math.degrees(_hrad_extremum(L))\n return max_chroma(L, H)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 117, "name": "_hrad_extremum", "kind": "ref", "category": "function", "info": " H = math.degrees(_hrad_extremum(L))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 118, "name": "max_chroma", "kind": "ref", "category": "function", "info": " return max_chroma(L, H)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 121, "name": "dot_product", "kind": "def", "category": "function", "info": "def dot_product(a, b):\n return sum(map(operator.mul, a, b))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 125, "name": "f", "kind": "def", "category": "function", "info": "def f(t):\n if t > lab_e:\n return (math.pow(t, 1.0 / 3.0))\n else:\n return (7.787 * t + 16.0 / 116.0)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 132, "name": "f_inv", "kind": "def", "category": "function", "info": "def f_inv(t):\n if math.pow(t, 3.0) > lab_e:\n return (math.pow(t, 3.0))\n else:\n return (116.0 * t - 16.0) / lab_k\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 139, "name": "from_linear", "kind": "def", "category": "function", "info": "def from_linear(c):\n if c <= 0.0031308:\n return 12.92 * c\n else:\n return (1.055 * math.pow(c, 1.0 / 2.4) - 0.055)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 146, "name": "to_linear", "kind": "def", "category": "function", "info": "def to_linear(c):\n a = 0.055\n\n if c > 0.04045:\n return (math.pow((c + a) / (1.0 + a), 2.4))\n else:\n return (c / 12.92)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 155, "name": "rgb_prepare", "kind": "def", "category": "function", "info": "def rgb_prepare(triple):\n ret = []\n for ch in triple:\n ch = round(ch, 3)\n\n if ch < -0.0001 or ch > 1.0001:\n raise Exception(f\"Illegal RGB value {ch:f}\")\n\n if ch < 0:\n ch = 0\n if ch > 1:\n ch = 1\n\n # Fix for Python 3 which by default rounds 4.5 down to 4.0\n # instead of Python 2 which is rounded to 5.0 which caused\n # a couple off by one errors in the tests. Tests now all pass\n # in Python 2 and Python 3\n ret.append(int(round(ch * 255 + 0.001, 0)))\n\n return ret\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 177, "name": "hex_to_rgb", "kind": "def", "category": "function", "info": "def hex_to_rgb(hex):\n if hex.startswith('#'):\n hex = hex[1:]\n r = int(hex[0:2], 16) / 255.0\n g = int(hex[2:4], 16) / 255.0\n b = int(hex[4:6], 16) / 255.0\n return [r, g, b]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 186, "name": "rgb_to_hex", "kind": "def", "category": "function", "info": "def rgb_to_hex(triple):\n [r, g, b] = triple\n return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 188, "name": "rgb_prepare", "kind": "ref", "category": "function", "info": " return '#%02x%02x%02x' % tuple(rgb_prepare([r, g, b]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 191, "name": "xyz_to_rgb", "kind": "def", "category": "function", "info": "def xyz_to_rgb(triple):\n xyz = map(lambda row: dot_product(row, triple), m)\n return list(map(from_linear, xyz))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 192, "name": "dot_product", "kind": "ref", "category": "function", "info": " xyz = map(lambda row: dot_product(row, triple), m)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 196, "name": "rgb_to_xyz", "kind": "def", "category": "function", "info": "def rgb_to_xyz(triple):\n rgbl = list(map(to_linear, triple))\n return list(map(lambda row: dot_product(row, rgbl), m_inv))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 198, "name": "dot_product", "kind": "ref", "category": "function", "info": " return list(map(lambda row: dot_product(row, rgbl), m_inv))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 201, "name": "xyz_to_luv", "kind": "def", "category": "function", "info": "def xyz_to_luv(triple):\n X, Y, Z = triple\n\n if X == Y == Z == 0.0:\n return [0.0, 0.0, 0.0]\n\n varU = (4.0 * X) / (X + (15.0 * Y) + (3.0 * Z))\n varV = (9.0 * Y) / (X + (15.0 * Y) + (3.0 * Z))\n L = 116.0 * f(Y / refY) - 16.0\n\n # Black will create a divide-by-zero error\n if L == 0.0:\n return [0.0, 0.0, 0.0]\n\n U = 13.0 * L * (varU - refU)\n V = 13.0 * L * (varV - refV)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 209, "name": "f", "kind": "ref", "category": "function", "info": " L = 116.0 * f(Y / refY) - 16.0\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 221, "name": "luv_to_xyz", "kind": "def", "category": "function", "info": "def luv_to_xyz(triple):\n L, U, V = triple\n\n if L == 0:\n return [0.0, 0.0, 0.0]\n\n varY = f_inv((L + 16.0) / 116.0)\n varU = U / (13.0 * L) + refU\n varV = V / (13.0 * L) + refV\n Y = varY * refY\n X = 0.0 - (9.0 * Y * varU) / ((varU - 4.0) * varV - varU * varV)\n Z = (9.0 * Y - (15.0 * varV * Y) - (varV * X)) / (3.0 * varV)\n\n return [X, Y, Z]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 227, "name": "f_inv", "kind": "ref", "category": "function", "info": " varY = f_inv((L + 16.0) / 116.0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 237, "name": "luv_to_lch", "kind": "def", "category": "function", "info": "def luv_to_lch(triple):\n L, U, V = triple\n\n C = (math.pow(math.pow(U, 2) + math.pow(V, 2), (1.0 / 2.0)))\n hrad = (math.atan2(V, U))\n H = math.degrees(hrad)\n if H < 0.0:\n H = 360.0 + H\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 249, "name": "lch_to_luv", "kind": "def", "category": "function", "info": "def lch_to_luv(triple):\n L, C, H = triple\n\n Hrad = math.radians(H)\n U = (math.cos(Hrad) * C)\n V = (math.sin(Hrad) * C)\n\n return [L, U, V]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 259, "name": "husl_to_lch", "kind": "def", "category": "function", "info": "def husl_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma(L, H)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 267, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 273, "name": "lch_to_husl", "kind": "def", "category": "function", "info": "def lch_to_husl(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma(L, H)\n S = C / mx * 100.0\n\n return [H, S, L]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 281, "name": "max_chroma", "kind": "ref", "category": "function", "info": " mx = max_chroma(L, H)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 287, "name": "huslp_to_lch", "kind": "def", "category": "function", "info": "def huslp_to_lch(triple):\n H, S, L = triple\n\n if L > 99.9999999:\n return [100, 0.0, H]\n if L < 0.00000001:\n return [0.0, 0.0, H]\n\n mx = max_chroma_pastel(L)\n C = mx / 100.0 * S\n\n return [L, C, H]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 295, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 301, "name": "lch_to_huslp", "kind": "def", "category": "function", "info": "def lch_to_huslp(triple):\n L, C, H = triple\n\n if L > 99.9999999:\n return [H, 0.0, 100.0]\n if L < 0.00000001:\n return [H, 0.0, 0.0]\n\n mx = max_chroma_pastel(L)\n S = C / mx * 100.0\n\n return [H, S, L]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/husl.py", "rel_fname": "seaborn/external/husl.py", "line": 309, "name": "max_chroma_pastel", "kind": "ref", "category": "function", "info": " mx = max_chroma_pastel(L)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 80, "name": "gaussian_kde", "kind": "def", "category": "class", "info": "__init__\tevaluate\tscotts_factor\tsilverman_factor\tset_bandwidth\t_compute_covariance\tpdf\tweights\tneff"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 201, "name": "astype", "kind": "ref", "category": "function", "info": " self._weights = atleast_1d(weights).astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 209, "name": "set_bandwidth", "kind": "ref", "category": "function", "info": " self.set_bandwidth(bw_method=bw_method)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 211, "name": "evaluate", "kind": "def", "category": "function", "info": " def evaluate(self, points):\n \"\"\"Evaluate the estimated pdf on a set of points.\n\n Parameters\n ----------\n points : (# of dimensions, # of points)-array\n Alternatively, a (# of dimensions,) vector can be passed in and\n treated as a single point.\n\n Returns\n -------\n values : (# of points,)-array\n The values at each point.\n\n Raises\n ------\n ValueError : if the dimensionality of the input points is different than\n the dimensionality of the KDE.\n\n \"\"\"\n points = atleast_2d(asarray(points))\n\n d, m = points.shape\n if d != self.d:\n if d == 1 and m == self.d:\n # points was passed in as a row vector\n points = reshape(points, (self.d, 1))\n m = 1\n else:\n msg = f\"points have dimension {d}, dataset has dimension {self.d}\"\n raise ValueError(msg)\n\n output_dtype = np.common_type(self.covariance, points)\n result = zeros((m,), dtype=output_dtype)\n\n whitening = linalg.cholesky(self.inv_cov)\n scaled_dataset = dot(whitening, self.dataset)\n scaled_points = dot(whitening, points)\n\n if m >= self.n:\n # there are more points than data, so loop over data\n for i in range(self.n):\n diff = scaled_dataset[:, i, newaxis] - scaled_points\n energy = sum(diff * diff, axis=0) / 2.0\n result += self.weights[i]*exp(-energy)\n else:\n # loop over points\n for i in range(m):\n diff = scaled_dataset - scaled_points[:, i, newaxis]\n energy = sum(diff * diff, axis=0) / 2.0\n result[i] = sum(exp(-energy)*self.weights, axis=0)\n\n result = result / self._norm_factor\n\n return result\n\n __call__ = evaluate\n\n def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 269, "name": "scotts_factor", "kind": "def", "category": "function", "info": " def scotts_factor(self):\n \"\"\"Compute Scott's factor.\n\n Returns\n -------\n s : float\n Scott's factor.\n \"\"\"\n return power(self.neff, -1./(self.d+4))\n\n def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 279, "name": "silverman_factor", "kind": "def", "category": "function", "info": " def silverman_factor(self):\n \"\"\"Compute the Silverman factor.\n\n Returns\n -------\n s : float\n The silverman factor.\n \"\"\"\n return power(self.neff*(self.d+2.0)/4.0, -1./(self.d+4))\n\n # Default method to calculate bandwidth, can be overwritten by subclass\n covariance_factor = scotts_factor\n covariance_factor.__doc__ = \"\"\"Computes the coefficient (`kde.factor`) that\n multiplies the data covariance matrix to obtain the kernel covariance\n matrix. The default is `scotts_factor`. A subclass can overwrite this\n method to provide a different method, or set it through a call to\n `kde.set_bandwidth`.\"\"\"\n\n def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 297, "name": "set_bandwidth", "kind": "def", "category": "function", "info": " def set_bandwidth(self, bw_method=None):\n \"\"\"Compute the estimator bandwidth with given method.\n\n The new bandwidth calculated after a call to `set_bandwidth` is used\n for subsequent evaluations of the estimated density.\n\n Parameters\n ----------\n bw_method : str, scalar or callable, optional\n The method used to calculate the estimator bandwidth. This can be\n 'scott', 'silverman', a scalar constant or a callable. If a\n scalar, this will be used directly as `kde.factor`. If a callable,\n it should take a `gaussian_kde` instance as only parameter and\n return a scalar. If None (default), nothing happens; the current\n `kde.covariance_factor` method is kept.\n\n Notes\n -----\n .. versionadded:: 0.11\n\n \"\"\"\n if bw_method is None:\n pass\n elif bw_method == 'scott':\n self.covariance_factor = self.scotts_factor\n elif bw_method == 'silverman':\n self.covariance_factor = self.silverman_factor\n elif np.isscalar(bw_method) and not isinstance(bw_method, str):\n self._bw_method = 'use constant'\n self.covariance_factor = lambda: bw_method\n elif callable(bw_method):\n self._bw_method = bw_method\n self.covariance_factor = lambda: self._bw_method(self)\n else:\n msg = \"`bw_method` should be 'scott', 'silverman', a scalar \" \\\n \"or a callable.\"\n raise ValueError(msg)\n\n self._compute_covariance()\n\n def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 329, "name": "_bw_method", "kind": "ref", "category": "function", "info": " self.covariance_factor = lambda: self._bw_method(self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 335, "name": "_compute_covariance", "kind": "ref", "category": "function", "info": " self._compute_covariance()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 337, "name": "_compute_covariance", "kind": "def", "category": "function", "info": " def _compute_covariance(self):\n \"\"\"Computes the covariance matrix for each Gaussian kernel using\n covariance_factor().\n \"\"\"\n self.factor = self.covariance_factor()\n # Cache covariance and inverse covariance of the data\n if not hasattr(self, '_data_inv_cov'):\n self._data_covariance = atleast_2d(cov(self.dataset, rowvar=1,\n bias=False,\n aweights=self.weights))\n self._data_inv_cov = linalg.inv(self._data_covariance)\n\n self.covariance = self._data_covariance * self.factor**2\n self.inv_cov = self._data_inv_cov / self.factor**2\n self._norm_factor = sqrt(linalg.det(2*pi*self.covariance))\n\n def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 341, "name": "covariance_factor", "kind": "ref", "category": "function", "info": " self.factor = self.covariance_factor()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 353, "name": "pdf", "kind": "def", "category": "function", "info": " def pdf(self, x):\n \"\"\"\n Evaluate the estimated pdf on a provided set of points.\n\n Notes\n -----\n This is an alias for `gaussian_kde.evaluate`. See the ``evaluate``\n docstring for more details.\n\n \"\"\"\n return self.evaluate(x)\n\n @property\n def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 363, "name": "evaluate", "kind": "ref", "category": "function", "info": " return self.evaluate(x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 366, "name": "weights", "kind": "def", "category": "function", "info": " def weights(self):\n try:\n return self._weights\n except AttributeError:\n self._weights = ones(self.n)/self.n\n return self._weights\n\n @property\n def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/kde.py", "rel_fname": "seaborn/external/kde.py", "line": 374, "name": "neff", "kind": "def", "category": "function", "info": " def neff(self):\n try:\n return self._neff\n except AttributeError:\n self._neff = 1/sum(self.weights**2)\n return self._neff\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 33, "name": "InfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 58, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> \"NegativeInfinityType\":\n return NegativeInfinity\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 62, "name": "InfinityType", "kind": "ref", "category": "function", "info": "Infinity = InfinityType()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 65, "name": "NegativeInfinityType", "kind": "def", "category": "class", "info": "__repr__\t__hash__\t__lt__\t__le__\t__eq__\t__ne__\t__gt__\t__ge__\t__neg__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 90, "name": "__neg__", "kind": "def", "category": "function", "info": " def __neg__(self: object) -> InfinityType:\n return Infinity\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 94, "name": "NegativeInfinityType", "kind": "ref", "category": "function", "info": "NegativeInfinity = NegativeInfinityType()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 127, "name": "InvalidVersion", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 133, "name": "_BaseVersion", "kind": "def", "category": "class", "info": "__hash__\t__lt__\t__le__\t__eq__\t__ge__\t__gt__\t__ne__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 213, "name": "Version", "kind": "def", "category": "class", "info": "__init__\t__repr__\t__str__\tepoch\trelease\tpre\tpost\tdev\tlocal\tpublic\tbase_version\tis_prerelease\tis_postrelease\tis_devrelease\tmajor\tminor\tmicro"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 222, "name": "InvalidVersion", "kind": "ref", "category": "function", "info": " raise InvalidVersion(f\"Invalid version: '{version}'\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 225, "name": "_Version", "kind": "ref", "category": "function", "info": " self._version = _Version(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 226, "name": "group", "kind": "ref", "category": "function", "info": " epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 227, "name": "group", "kind": "ref", "category": "function", "info": " release=tuple(int(i) for i in match.group(\"release\").split(\".\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 228, "name": "group", "kind": "ref", "category": "function", "info": " pre=_parse_letter_version(match.group(\"pre_l\"), match.group(\"pre_n\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 229, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " post=_parse_letter_version(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 230, "name": "group", "kind": "ref", "category": "function", "info": " match.group(\"post_l\"), match.group(\"post_n1\") or match.group(\"post_n2\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "_parse_letter_version", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 232, "name": "group", "kind": "ref", "category": "function", "info": " dev=_parse_letter_version(match.group(\"dev_l\"), match.group(\"dev_n\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "_parse_local_version", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 233, "name": "group", "kind": "ref", "category": "function", "info": " local=_parse_local_version(match.group(\"local\")),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 237, "name": "_cmpkey", "kind": "ref", "category": "function", "info": " self._key = _cmpkey(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 278, "name": "epoch", "kind": "def", "category": "function", "info": " def epoch(self) -> int:\n _epoch: int = self._version.epoch\n return _epoch\n\n @property\n def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 283, "name": "release", "kind": "def", "category": "function", "info": " def release(self) -> Tuple[int, ...]:\n _release: Tuple[int, ...] = self._version.release\n return _release\n\n @property\n def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 288, "name": "pre", "kind": "def", "category": "function", "info": " def pre(self) -> Optional[Tuple[str, int]]:\n _pre: Optional[Tuple[str, int]] = self._version.pre\n return _pre\n\n @property\n def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 293, "name": "post", "kind": "def", "category": "function", "info": " def post(self) -> Optional[int]:\n return self._version.post[1] if self._version.post else None\n\n @property\n def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 297, "name": "dev", "kind": "def", "category": "function", "info": " def dev(self) -> Optional[int]:\n return self._version.dev[1] if self._version.dev else None\n\n @property\n def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 301, "name": "local", "kind": "def", "category": "function", "info": " def local(self) -> Optional[str]:\n if self._version.local:\n return \".\".join(str(x) for x in self._version.local)\n else:\n return None\n\n @property\n def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 308, "name": "public", "kind": "def", "category": "function", "info": " def public(self) -> str:\n return str(self).split(\"+\", 1)[0]\n\n @property\n def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 312, "name": "base_version", "kind": "def", "category": "function", "info": " def base_version(self) -> str:\n parts = []\n\n # Epoch\n if self.epoch != 0:\n parts.append(f\"{self.epoch}!\")\n\n # Release segment\n parts.append(\".\".join(str(x) for x in self.release))\n\n return \"\".join(parts)\n\n @property\n def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 325, "name": "is_prerelease", "kind": "def", "category": "function", "info": " def is_prerelease(self) -> bool:\n return self.dev is not None or self.pre is not None\n\n @property\n def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 329, "name": "is_postrelease", "kind": "def", "category": "function", "info": " def is_postrelease(self) -> bool:\n return self.post is not None\n\n @property\n def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 333, "name": "is_devrelease", "kind": "def", "category": "function", "info": " def is_devrelease(self) -> bool:\n return self.dev is not None\n\n @property\n def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 337, "name": "major", "kind": "def", "category": "function", "info": " def major(self) -> int:\n return self.release[0] if len(self.release) >= 1 else 0\n\n @property\n def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 341, "name": "minor", "kind": "def", "category": "function", "info": " def minor(self) -> int:\n return self.release[1] if len(self.release) >= 2 else 0\n\n @property\n def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 345, "name": "micro", "kind": "def", "category": "function", "info": " def micro(self) -> int:\n return self.release[2] if len(self.release) >= 3 else 0\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 349, "name": "_parse_letter_version", "kind": "def", "category": "function", "info": "def _parse_letter_version(\n letter: str, number: Union[str, bytes, SupportsInt]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 388, "name": "_parse_local_version", "kind": "def", "category": "function", "info": "def _parse_local_version(local: str) -> Optional[LocalType]:\n \"\"\"\n Takes a string like abc.1.twelve and turns it into (\"abc\", 1, \"twelve\").\n \"\"\"\n if local is not None:\n return tuple(\n part.lower() if not part.isdigit() else int(part)\n for part in _local_version_separators.split(local)\n )\n return None\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/external/version.py", "rel_fname": "seaborn/external/version.py", "line": 400, "name": "_cmpkey", "kind": "def", "category": "function", "info": "def _cmpkey(\n epoch: int,\n release: Tuple[int, ...],\n pre: Optional[Tuple[str, int]],\n post: Optional[Tuple[str, int]],\n dev: Optional[Tuple[str, int]],\n local: Optional[Tuple[SubLocalType]],\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 30, "name": "_index_to_label", "kind": "def", "category": "function", "info": "def _index_to_label(index):\n \"\"\"Convert a pandas index or multiindex to an axis label.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return \"-\".join(map(to_utf8, index.names))\n else:\n return index.name\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 38, "name": "_index_to_ticklabels", "kind": "def", "category": "function", "info": "def _index_to_ticklabels(index):\n \"\"\"Convert a pandas index or multiindex into ticklabels.\"\"\"\n if isinstance(index, pd.MultiIndex):\n return [\"-\".join(map(to_utf8, i)) for i in index.values]\n else:\n return index.values\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 46, "name": "_convert_colors", "kind": "def", "category": "function", "info": "def _convert_colors(colors):\n \"\"\"Convert either a list of colors or nested lists of colors to RGB.\"\"\"\n to_rgb = mpl.colors.to_rgb\n\n try:\n to_rgb(colors[0])\n # If this works, there is only one level of colors\n return list(map(to_rgb, colors))\n except ValueError:\n # If we get here, we have nested lists\n return [list(map(to_rgb, l)) for l in colors]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 51, "name": "to_rgb", "kind": "ref", "category": "function", "info": " to_rgb(colors[0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 59, "name": "_matrix_mask", "kind": "def", "category": "function", "info": "def _matrix_mask(data, mask):\n \"\"\"Ensure that data and mask are compatible and add missing values.\n\n Values will be plotted for cells where ``mask`` is ``False``.\n\n ``data`` is expected to be a DataFrame; ``mask`` can be an array or\n a DataFrame.\n\n \"\"\"\n if mask is None:\n mask = np.zeros(data.shape, bool)\n\n if isinstance(mask, np.ndarray):\n # For array masks, ensure that shape matches data then convert\n if mask.shape != data.shape:\n raise ValueError(\"Mask must have the same shape as data.\")\n\n mask = pd.DataFrame(mask,\n index=data.index,\n columns=data.columns,\n dtype=bool)\n\n elif isinstance(mask, pd.DataFrame):\n # For DataFrame masks, ensure that semantic labels match data\n if not mask.index.equals(data.index) \\\n and mask.columns.equals(data.columns):\n err = \"Mask must have the same index and columns as data.\"\n raise ValueError(err)\n\n # Add any cells with missing data to the mask\n # This works around an issue where `plt.pcolormesh` doesn't represent\n # missing data properly\n mask = mask | pd.isnull(data)\n\n return mask\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 83, "name": "equals", "kind": "ref", "category": "function", "info": " if not mask.index.equals(data.index) \\\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 84, "name": "equals", "kind": "ref", "category": "function", "info": " and mask.columns.equals(data.columns):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 96, "name": "_HeatMapper", "kind": "def", "category": "class", "info": "__init__\t_determine_cmap_params\t_annotate_heatmap\t_skip_ticks\t_auto_ticks\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 112, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " mask = _matrix_mask(data, mask)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 114, "name": "masked_where", "kind": "ref", "category": "function", "info": " plot_data = np.ma.masked_where(np.asarray(mask), plot_data)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 120, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 122, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 129, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 131, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 140, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.xticklabels = _index_to_ticklabels(data.columns)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 142, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.xticks, self.xticklabels = self._skip_ticks(xticklabels,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 150, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " self.yticklabels = _index_to_ticklabels(data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 152, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " self.yticks, self.yticklabels = self._skip_ticks(yticklabels,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 156, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " xlabel = _index_to_label(data.columns)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 157, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " ylabel = _index_to_label(data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 162, "name": "_determine_cmap_params", "kind": "ref", "category": "function", "info": " self._determine_cmap_params(plot_data, vmin, vmax,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 191, "name": "_determine_cmap_params", "kind": "def", "category": "function", "info": " def _determine_cmap_params(self, plot_data, vmin, vmax,\n cmap, center, robust):\n \"\"\"Use some heuristics to set good defaults for colorbar and range.\"\"\"\n\n # plot_data is a np.ma.array instance\n calc_data = plot_data.astype(float).filled(np.nan)\n if vmin is None:\n if robust:\n vmin = np.nanpercentile(calc_data, 2)\n else:\n vmin = np.nanmin(calc_data)\n if vmax is None:\n if robust:\n vmax = np.nanpercentile(calc_data, 98)\n else:\n vmax = np.nanmax(calc_data)\n self.vmin, self.vmax = vmin, vmax\n\n # Choose default colormaps if not provided\n if cmap is None:\n if center is None:\n self.cmap = cm.rocket\n else:\n self.cmap = cm.icefire\n elif isinstance(cmap, str):\n self.cmap = get_colormap(cmap)\n elif isinstance(cmap, list):\n self.cmap = mpl.colors.ListedColormap(cmap)\n else:\n self.cmap = cmap\n\n # Recenter a divergent colormap\n if center is not None:\n\n # Copy bad values\n # in mpl<3.2 only masked values are honored with \"bad\" color spec\n # (see https://github.com/matplotlib/matplotlib/pull/14257)\n bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n\n # under/over values are set for sure when cmap extremes\n # do not map to the same color as +-inf\n under = self.cmap(-np.inf)\n over = self.cmap(np.inf)\n under_set = under != self.cmap(0)\n over_set = over != self.cmap(self.cmap.N - 1)\n\n vrange = max(vmax - center, center - vmin)\n normlize = mpl.colors.Normalize(center - vrange, center + vrange)\n cmin, cmax = normlize([vmin, vmax])\n cc = np.linspace(cmin, cmax, 256)\n self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n self.cmap.set_bad(bad)\n if under_set:\n self.cmap.set_under(under)\n if over_set:\n self.cmap.set_over(over)\n\n def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if kws.get(\"norm\") is None:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 196, "name": "astype", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 196, "name": "filled", "kind": "ref", "category": "function", "info": " calc_data = plot_data.astype(float).filled(np.nan)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 216, "name": "get_colormap", "kind": "ref", "category": "function", "info": " self.cmap = get_colormap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 218, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 228, "name": "cmap", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 228, "name": "masked_invalid", "kind": "ref", "category": "function", "info": " bad = self.cmap(np.ma.masked_invalid([np.nan]))[0]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 232, "name": "cmap", "kind": "ref", "category": "function", "info": " under = self.cmap(-np.inf)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 233, "name": "cmap", "kind": "ref", "category": "function", "info": " over = self.cmap(np.inf)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 234, "name": "cmap", "kind": "ref", "category": "function", "info": " under_set = under != self.cmap(0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 235, "name": "cmap", "kind": "ref", "category": "function", "info": " over_set = over != self.cmap(self.cmap.N - 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 239, "name": "normlize", "kind": "ref", "category": "function", "info": " cmin, cmax = normlize([vmin, vmax])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 241, "name": "cmap", "kind": "ref", "category": "function", "info": " self.cmap = mpl.colors.ListedColormap(self.cmap(cc))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 242, "name": "set_bad", "kind": "ref", "category": "function", "info": " self.cmap.set_bad(bad)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 244, "name": "set_under", "kind": "ref", "category": "function", "info": " self.cmap.set_under(under)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 246, "name": "set_over", "kind": "ref", "category": "function", "info": " self.cmap.set_over(over)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 248, "name": "_annotate_heatmap", "kind": "def", "category": "function", "info": " def _annotate_heatmap(self, ax, mesh):\n \"\"\"Add textual labels with the value in each cell.\"\"\"\n mesh.update_scalarmappable()\n height, width = self.annot_data.shape\n xpos, ypos = np.meshgrid(np.arange(width) + .5, np.arange(height) + .5)\n for x, y, m, color, val in zip(xpos.flat, ypos.flat,\n mesh.get_array(), mesh.get_facecolors(),\n self.annot_data.flat):\n if m is not np.ma.masked:\n lum = relative_luminance(color)\n text_color = \".15\" if lum > .408 else \"w\"\n annotation = (\"{:\" + self.fmt + \"}\").format(val)\n text_kwargs = dict(color=text_color, ha=\"center\", va=\"center\")\n text_kwargs.update(self.annot_kws)\n ax.text(x, y, annotation, **text_kwargs)\n\n def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if kws.get(\"norm\") is None:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 257, "name": "relative_luminance", "kind": "ref", "category": "function", "info": " lum = relative_luminance(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 264, "name": "_skip_ticks", "kind": "def", "category": "function", "info": " def _skip_ticks(self, labels, tickevery):\n \"\"\"Return ticks and labels at evenly spaced intervals.\"\"\"\n n = len(labels)\n if tickevery == 0:\n ticks, labels = [], []\n elif tickevery == 1:\n ticks, labels = np.arange(n) + .5, labels\n else:\n start, end, step = 0, n, tickevery\n ticks = np.arange(start, end, step) + .5\n labels = labels[start:end:step]\n return ticks, labels\n\n def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if kws.get(\"norm\") is None:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 277, "name": "_auto_ticks", "kind": "def", "category": "function", "info": " def _auto_ticks(self, ax, labels, axis):\n \"\"\"Determine ticks and ticklabels that minimize overlap.\"\"\"\n transform = ax.figure.dpi_scale_trans.inverted()\n bbox = ax.get_window_extent().transformed(transform)\n size = [bbox.width, bbox.height][axis]\n axis = [ax.xaxis, ax.yaxis][axis]\n tick, = axis.set_ticks([0])\n fontsize = tick.label1.get_size()\n max_ticks = int(size // (fontsize / 72))\n if max_ticks < 1:\n return [], []\n tick_every = len(labels) // max_ticks + 1\n tick_every = 1 if tick_every == 0 else tick_every\n ticks, labels = self._skip_ticks(labels, tick_every)\n return ticks, labels\n\n def plot(self, ax, cax, kws):\n \"\"\"Draw the heatmap on the provided Axes.\"\"\"\n # Remove all the Axes spines\n despine(ax=ax, left=True, bottom=True)\n\n # setting vmin/vmax in addition to norm is deprecated\n # so avoid setting if norm is set\n if kws.get(\"norm\") is None:\n kws.setdefault(\"vmin\", self.vmin)\n kws.setdefault(\"vmax\", self.vmax)\n\n # Draw the heatmap\n mesh = ax.pcolormesh(self.plot_data, cmap=self.cmap, **kws)\n\n # Set the axis limits\n ax.set(xlim=(0, self.data.shape[1]), ylim=(0, self.data.shape[0]))\n\n # Invert the y axis to show the plot in matrix form\n ax.invert_yaxis()\n\n # Possibly add a colorbar\n if self.cbar:\n cb = ax.figure.colorbar(mesh, cax, ax, **self.cbar_kws)\n cb.outline.set_linewidth(0)\n # If rasterized is passed to pcolormesh, also rasterize the\n # colorbar to avoid white lines on the PDF rendering\n if kws.get('rasterized', False):\n cb.solids.set_rasterized(True)\n\n # Add row and column labels\n if isinstance(self.xticks, str) and self.xticks == \"auto\":\n xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n else:\n xticks, xticklabels = self.xticks, self.xticklabels\n\n if isinstance(self.yticks, str) and self.yticks == \"auto\":\n yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n else:\n yticks, yticklabels = self.yticks, self.yticklabels\n\n ax.set(xticks=xticks, yticks=yticks)\n xtl = ax.set_xticklabels(xticklabels)\n ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n plt.setp(ytl, va=\"center\") # GH2484\n\n # Possibly rotate them if they overlap\n _draw_figure(ax.figure)\n\n if axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n if axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n\n # Add the axis labels\n ax.set(xlabel=self.xlabel, ylabel=self.ylabel)\n\n # Annotate the cells with the formatted values\n if self.annot:\n self._annotate_heatmap(ax, mesh)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 279, "name": "inverted", "kind": "ref", "category": "function", "info": " transform = ax.figure.dpi_scale_trans.inverted()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 280, "name": "transformed", "kind": "ref", "category": "function", "info": " bbox = ax.get_window_extent().transformed(transform)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 283, "name": "set_ticks", "kind": "ref", "category": "function", "info": " tick, = axis.set_ticks([0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 284, "name": "get_size", "kind": "ref", "category": "function", "info": " fontsize = tick.label1.get_size()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 290, "name": "_skip_ticks", "kind": "ref", "category": "function", "info": " ticks, labels = self._skip_ticks(labels, tick_every)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 296, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 311, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 324, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " xticks, xticklabels = self._auto_ticks(ax, self.xticklabels, 0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 329, "name": "_auto_ticks", "kind": "ref", "category": "function", "info": " yticks, yticklabels = self._auto_ticks(ax, self.yticklabels, 1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 334, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(xticklabels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 335, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(yticklabels, rotation=\"vertical\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 339, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 341, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 343, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 351, "name": "_annotate_heatmap", "kind": "ref", "category": "function", "info": " self._annotate_heatmap(ax, mesh)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 354, "name": "heatmap", "kind": "def", "category": "function", "info": "def heatmap(\n data, *,\n vmin=None, vmax=None, cmap=None, center=None, robust=False,\n annot=None, fmt=\".2g\", annot_kws=None,\n linewidths=0, linecolor=\"white\",\n cbar=True, cbar_kws=None, cbar_ax=None,\n square=False, xticklabels=\"auto\", yticklabels=\"auto\",\n mask=None, ax=None,\n **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 445, "name": "_HeatMapper", "kind": "ref", "category": "function", "info": " plotter = _HeatMapper(data, vmin, vmax, cmap, center, robust, annot, fmt,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 457, "name": "set_aspect", "kind": "ref", "category": "function", "info": " ax.set_aspect(\"equal\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 462, "name": "_DendrogramPlotter", "kind": "def", "category": "class", "info": "__init__\t_calculate_linkage_scipy\t_calculate_linkage_fastcluster\tcalculated_linkage\tcalculate_dendrogram\treordered_ind\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 497, "name": "calculate_dendrogram", "kind": "ref", "category": "function", "info": " self.dendrogram = self.calculate_dendrogram()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 503, "name": "_index_to_ticklabels", "kind": "ref", "category": "function", "info": " ticklabels = _index_to_ticklabels(self.data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 511, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.ylabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 519, "name": "_index_to_label", "kind": "ref", "category": "function", "info": " self.xlabel = _index_to_label(self.data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 528, "name": "_calculate_linkage_scipy", "kind": "def", "category": "function", "info": " def _calculate_linkage_scipy(self):\n linkage = hierarchy.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.prod(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 533, "name": "_calculate_linkage_fastcluster", "kind": "def", "category": "function", "info": " def _calculate_linkage_fastcluster(self):\n import fastcluster\n # Fastcluster has a memory-saving vectorized version, but only\n # with certain linkage methods, and mostly with euclidean metric\n # vector_methods = ('single', 'centroid', 'median', 'ward')\n euclidean_methods = ('centroid', 'median', 'ward')\n euclidean = self.metric == 'euclidean' and self.method in \\\n euclidean_methods\n if euclidean or self.method == 'single':\n return fastcluster.linkage_vector(self.array,\n method=self.method,\n metric=self.metric)\n else:\n linkage = fastcluster.linkage(self.array, method=self.method,\n metric=self.metric)\n return linkage\n\n @property\n def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.prod(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 542, "name": "linkage_vector", "kind": "ref", "category": "function", "info": " return fastcluster.linkage_vector(self.array,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 551, "name": "calculated_linkage", "kind": "def", "category": "function", "info": " def calculated_linkage(self):\n\n try:\n return self._calculate_linkage_fastcluster()\n except ImportError:\n if np.prod(self.shape) >= 10000:\n msg = (\"Clustering large matrix with scipy. Installing \"\n \"`fastcluster` may give better performance.\")\n warnings.warn(msg)\n\n return self._calculate_linkage_scipy()\n\n def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 554, "name": "_calculate_linkage_fastcluster", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_fastcluster()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 561, "name": "_calculate_linkage_scipy", "kind": "ref", "category": "function", "info": " return self._calculate_linkage_scipy()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 563, "name": "calculate_dendrogram", "kind": "def", "category": "function", "info": " def calculate_dendrogram(self):\n \"\"\"Calculates a dendrogram based on the linkage matrix\n\n Made a separate function, not a property because don't want to\n recalculate the dendrogram every time it is accessed.\n\n Returns\n -------\n dendrogram : dict\n Dendrogram dictionary as returned by scipy.cluster.hierarchy\n .dendrogram. The important key-value pairing is\n \"reordered_ind\" which indicates the re-ordering of the matrix\n \"\"\"\n return hierarchy.dendrogram(self.linkage, no_plot=True,\n color_threshold=-np.inf)\n\n @property\n def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 580, "name": "reordered_ind", "kind": "def", "category": "function", "info": " def reordered_ind(self):\n \"\"\"Indices of the matrix, reordered by the dendrogram\"\"\"\n return self.dendrogram['leaves']\n\n def plot(self, ax, tree_kws):\n \"\"\"Plots a dendrogram of the similarities between data on the axes\n\n Parameters\n ----------\n ax : matplotlib.axes.Axes\n Axes object upon which the dendrogram is plotted\n\n \"\"\"\n tree_kws = {} if tree_kws is None else tree_kws.copy()\n tree_kws.setdefault(\"linewidths\", .5)\n tree_kws.setdefault(\"colors\", tree_kws.pop(\"color\", (.2, .2, .2)))\n\n if self.rotate and self.axis == 0:\n coords = zip(self.dependent_coord, self.independent_coord)\n else:\n coords = zip(self.independent_coord, self.dependent_coord)\n lines = LineCollection([list(zip(x, y)) for x, y in coords],\n **tree_kws)\n\n ax.add_collection(lines)\n number_of_leaves = len(self.reordered_ind)\n max_dependent_coord = max(map(max, self.dependent_coord))\n\n if self.rotate:\n ax.yaxis.set_ticks_position('right')\n\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_ylim(0, number_of_leaves * 10)\n ax.set_xlim(0, max_dependent_coord * 1.05)\n\n ax.invert_xaxis()\n ax.invert_yaxis()\n else:\n # Constants 10 and 1.05 come from\n # `scipy.cluster.hierarchy._plot_dendrogram`\n ax.set_xlim(0, number_of_leaves * 10)\n ax.set_ylim(0, max_dependent_coord * 1.05)\n\n despine(ax=ax, bottom=True, left=True)\n\n ax.set(xticks=self.xticks, yticks=self.yticks,\n xlabel=self.xlabel, ylabel=self.ylabel)\n xtl = ax.set_xticklabels(self.xticklabels)\n ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n\n # Force a draw of the plot to avoid matplotlib window error\n _draw_figure(ax.figure)\n\n if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n plt.setp(ytl, rotation=\"horizontal\")\n if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n plt.setp(xtl, rotation=\"vertical\")\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 604, "name": "add_collection", "kind": "ref", "category": "function", "info": " ax.add_collection(lines)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 609, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 613, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, number_of_leaves * 10)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 614, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 616, "name": "invert_xaxis", "kind": "ref", "category": "function", "info": " ax.invert_xaxis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 617, "name": "invert_yaxis", "kind": "ref", "category": "function", "info": " ax.invert_yaxis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 621, "name": "set_xlim", "kind": "ref", "category": "function", "info": " ax.set_xlim(0, number_of_leaves * 10)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 622, "name": "set_ylim", "kind": "ref", "category": "function", "info": " ax.set_ylim(0, max_dependent_coord * 1.05)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 624, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=ax, bottom=True, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 628, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " xtl = ax.set_xticklabels(self.xticklabels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 629, "name": "set_yticklabels", "kind": "ref", "category": "function", "info": " ytl = ax.set_yticklabels(self.yticklabels, rotation='vertical')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 632, "name": "_draw_figure", "kind": "ref", "category": "function", "info": " _draw_figure(ax.figure)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 634, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(ytl) > 0 and axis_ticklabels_overlap(ytl):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 636, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " if len(xtl) > 0 and axis_ticklabels_overlap(xtl):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 686, "name": "_DendrogramPlotter", "kind": "ref", "category": "function", "info": " plotter = _DendrogramPlotter(data, linkage=linkage, axis=axis,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 695, "name": "ClusterGrid", "kind": "def", "category": "class", "info": "__init__\t_preprocess_colors\tformat_data\tz_score\tstandard_scale\tdim_ratios\tcolor_list_to_matrix_and_cmap\tplot_dendrograms\tplot_colors\tplot_matrix\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 709, "name": "format_data", "kind": "ref", "category": "function", "info": " self.data2d = self.format_data(self.data, pivot_kws, z_score,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 712, "name": "_matrix_mask", "kind": "ref", "category": "function", "info": " self.mask = _matrix_mask(self.data2d, mask)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 717, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, row_colors, axis=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 719, "name": "_preprocess_colors", "kind": "ref", "category": "function", "info": " self._preprocess_colors(data, col_colors, axis=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 731, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " width_ratios = self.dim_ratios(self.row_colors,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 734, "name": "dim_ratios", "kind": "ref", "category": "function", "info": " height_ratios = self.dim_ratios(self.col_colors,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 745, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram = self._figure.add_subplot(self.gs[-1, 0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 746, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram = self._figure.add_subplot(self.gs[0, -1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 747, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 748, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 754, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_row_colors = self._figure.add_subplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 757, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_col_colors = self._figure.add_subplot(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 760, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_heatmap = self._figure.add_subplot(self.gs[-1, -1])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 766, "name": "add_subplot", "kind": "ref", "category": "function", "info": " self.ax_cbar = self._figure.add_subplot(self.gs[0, 0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 773, "name": "_preprocess_colors", "kind": "def", "category": "function", "info": " def _preprocess_colors(self, data, colors, axis):\n \"\"\"Preprocess {row/col}_colors to extract labels and convert colors.\"\"\"\n labels = None\n\n if colors is not None:\n if isinstance(colors, (pd.DataFrame, pd.Series)):\n\n # If data is unindexed, raise\n if (not hasattr(data, \"index\") and axis == 0) or (\n not hasattr(data, \"columns\") and axis == 1\n ):\n axis_name = \"col\" if axis else \"row\"\n msg = (f\"{axis_name}_colors indices can't be matched with data \"\n f\"indices. Provide {axis_name}_colors as a non-indexed \"\n \"datatype, e.g. by using `.to_numpy()``\")\n raise TypeError(msg)\n\n # Ensure colors match data indices\n if axis == 0:\n colors = colors.reindex(data.index)\n else:\n colors = colors.reindex(data.columns)\n\n # Replace na's with white color\n # TODO We should set these to transparent instead\n colors = colors.astype(object).fillna('white')\n\n # Extract color values and labels from frame/series\n if isinstance(colors, pd.DataFrame):\n labels = list(colors.columns)\n colors = colors.T.values\n else:\n if colors.name is None:\n labels = [\"\"]\n else:\n labels = [colors.name]\n colors = colors.values\n\n colors = _convert_colors(colors)\n\n return colors, labels\n\n def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 792, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.index)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 794, "name": "reindex", "kind": "ref", "category": "function", "info": " colors = colors.reindex(data.columns)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 798, "name": "astype", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 798, "name": "fillna", "kind": "ref", "category": "function", "info": " colors = colors.astype(object).fillna('white')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 811, "name": "_convert_colors", "kind": "ref", "category": "function", "info": " colors = _convert_colors(colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 815, "name": "format_data", "kind": "def", "category": "function", "info": " def format_data(self, data, pivot_kws, z_score=None,\n standard_scale=None):\n \"\"\"Extract variables from data or use directly.\"\"\"\n\n # Either the data is already in 2d matrix format, or need to do a pivot\n if pivot_kws is not None:\n data2d = data.pivot(**pivot_kws)\n else:\n data2d = data\n\n if z_score is not None and standard_scale is not None:\n raise ValueError(\n 'Cannot perform both z-scoring and standard-scaling on data')\n\n if z_score is not None:\n data2d = self.z_score(data2d, z_score)\n if standard_scale is not None:\n data2d = self.standard_scale(data2d, standard_scale)\n return data2d\n\n @staticmethod\n def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 830, "name": "z_score", "kind": "ref", "category": "function", "info": " data2d = self.z_score(data2d, z_score)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 832, "name": "standard_scale", "kind": "ref", "category": "function", "info": " data2d = self.standard_scale(data2d, standard_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 836, "name": "z_score", "kind": "def", "category": "function", "info": " def z_score(data2d, axis=1):\n \"\"\"Standarize the mean and variance of the data axis\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n normalized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n \"\"\"\n if axis == 1:\n z_scored = data2d\n else:\n z_scored = data2d.T\n\n z_scored = (z_scored - z_scored.mean()) / z_scored.std()\n\n if axis == 1:\n return z_scored\n else:\n return z_scored.T\n\n @staticmethod\n def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 866, "name": "standard_scale", "kind": "def", "category": "function", "info": " def standard_scale(data2d, axis=1):\n \"\"\"Divide the data by the difference between the max and min\n\n Parameters\n ----------\n data2d : pandas.DataFrame\n Data to normalize\n axis : int\n Which axis to normalize across. If 0, normalize across rows, if 1,\n normalize across columns.\n\n Returns\n -------\n standardized : pandas.DataFrame\n Noramlized data with a mean of 0 and variance of 1 across the\n specified axis.\n\n \"\"\"\n # Normalize these values to range from 0 to 1\n if axis == 1:\n standardized = data2d\n else:\n standardized = data2d.T\n\n subtract = standardized.min()\n standardized = (standardized - subtract) / (\n standardized.max() - standardized.min())\n\n if axis == 1:\n return standardized\n else:\n return standardized.T\n\n def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 899, "name": "dim_ratios", "kind": "def", "category": "function", "info": " def dim_ratios(self, colors, dendrogram_ratio, colors_ratio):\n \"\"\"Get the proportions of the figure taken up by each axes.\"\"\"\n ratios = [dendrogram_ratio]\n\n if colors is not None:\n # Colors are encoded as rgb, so there is an extra dimension\n if np.ndim(colors) > 2:\n n_colors = len(colors)\n else:\n n_colors = 1\n\n ratios += [n_colors * colors_ratio]\n\n # Add the ratio for the heatmap itself\n ratios.append(1 - sum(ratios))\n\n return ratios\n\n @staticmethod\n def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 918, "name": "color_list_to_matrix_and_cmap", "kind": "def", "category": "function", "info": " def color_list_to_matrix_and_cmap(colors, ind, axis=0):\n \"\"\"Turns a list of colors into a numpy matrix and matplotlib colormap\n\n These arguments can now be plotted using heatmap(matrix, cmap)\n and the provided colors will be plotted.\n\n Parameters\n ----------\n colors : list of matplotlib colors\n Colors to label the rows or columns of a dataframe.\n ind : list of ints\n Ordering of the rows or columns, to reorder the original colors\n by the clustered dendrogram order\n axis : int\n Which axis this is labeling\n\n Returns\n -------\n matrix : numpy.array\n A numpy array of integer values, where each indexes into the cmap\n cmap : matplotlib.colors.ListedColormap\n\n \"\"\"\n try:\n mpl.colors.to_rgb(colors[0])\n except ValueError:\n # We have a 2D color structure\n m, n = len(colors), len(colors[0])\n if not all(len(c) == n for c in colors[1:]):\n raise ValueError(\"Multiple side color vectors must have same size\")\n else:\n # We have one vector of colors\n m, n = 1, len(colors)\n colors = [colors]\n\n # Map from unique colors to colormap index value\n unique_colors = {}\n matrix = np.zeros((m, n), int)\n for i, inner in enumerate(colors):\n for j, color in enumerate(inner):\n idx = unique_colors.setdefault(color, len(unique_colors))\n matrix[i, j] = idx\n\n # Reorder for clustering and transpose for axis\n matrix = matrix[:, ind]\n if axis == 0:\n matrix = matrix.T\n\n cmap = mpl.colors.ListedColormap(list(unique_colors))\n return matrix, cmap\n\n def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 942, "name": "to_rgb", "kind": "ref", "category": "function", "info": " mpl.colors.to_rgb(colors[0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 966, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(list(unique_colors))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 969, "name": "plot_dendrograms", "kind": "def", "category": "function", "info": " def plot_dendrograms(self, row_cluster, col_cluster, metric, method,\n row_linkage, col_linkage, tree_kws):\n # Plot the row dendrogram\n if row_cluster:\n self.dendrogram_row = dendrogram(\n self.data2d, metric=metric, method=method, label=False, axis=0,\n ax=self.ax_row_dendrogram, rotate=True, linkage=row_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_row_dendrogram.set_xticks([])\n self.ax_row_dendrogram.set_yticks([])\n # PLot the column dendrogram\n if col_cluster:\n self.dendrogram_col = dendrogram(\n self.data2d, metric=metric, method=method, label=False,\n axis=1, ax=self.ax_col_dendrogram, linkage=col_linkage,\n tree_kws=tree_kws\n )\n else:\n self.ax_col_dendrogram.set_xticks([])\n self.ax_col_dendrogram.set_yticks([])\n despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n\n def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 979, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_xticks([])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 980, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_row_dendrogram.set_yticks([])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 989, "name": "set_xticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_xticks([])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 990, "name": "set_yticks", "kind": "ref", "category": "function", "info": " self.ax_col_dendrogram.set_yticks([])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 991, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_row_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 992, "name": "despine", "kind": "ref", "category": "function", "info": " despine(ax=self.ax_col_dendrogram, bottom=True, left=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 994, "name": "plot_colors", "kind": "def", "category": "function", "info": " def plot_colors(self, xind, yind, **kws):\n \"\"\"Plots color labels between the dendrogram and the heatmap\n\n Parameters\n ----------\n heatmap_kws : dict\n Keyword arguments heatmap\n\n \"\"\"\n # Remove any custom colormap and centering\n # TODO this code has consistently caused problems when we\n # have missed kwargs that need to be excluded that it might\n # be better to rewrite *in*clusively.\n kws = kws.copy()\n kws.pop('cmap', None)\n kws.pop('norm', None)\n kws.pop('center', None)\n kws.pop('annot', None)\n kws.pop('vmin', None)\n kws.pop('vmax', None)\n kws.pop('robust', None)\n kws.pop('xticklabels', None)\n kws.pop('yticklabels', None)\n\n # Plot the row colors\n if self.row_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.row_colors, yind, axis=0)\n\n # Get row_color labels\n if self.row_color_labels is not None:\n row_color_labels = self.row_color_labels\n else:\n row_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n xticklabels=row_color_labels, yticklabels=False, **kws)\n\n # Adjust rotation of labels\n if row_color_labels is not False:\n plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n else:\n despine(self.ax_row_colors, left=True, bottom=True)\n\n # Plot the column colors\n if self.col_colors is not None:\n matrix, cmap = self.color_list_to_matrix_and_cmap(\n self.col_colors, xind, axis=1)\n\n # Get col_color labels\n if self.col_color_labels is not None:\n col_color_labels = self.col_color_labels\n else:\n col_color_labels = False\n\n heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n xticklabels=False, yticklabels=col_color_labels, **kws)\n\n # Adjust rotation of labels, place on right side\n if col_color_labels is not False:\n self.ax_col_colors.yaxis.tick_right()\n plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n else:\n despine(self.ax_col_colors, left=True, bottom=True)\n\n def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1020, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1029, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_row_colors,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1034, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_row_colors.get_xticklabels(), rotation=90)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1036, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_row_colors, left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1040, "name": "color_list_to_matrix_and_cmap", "kind": "ref", "category": "function", "info": " matrix, cmap = self.color_list_to_matrix_and_cmap(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1049, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(matrix, cmap=cmap, cbar=False, ax=self.ax_col_colors,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1054, "name": "tick_right", "kind": "ref", "category": "function", "info": " self.ax_col_colors.yaxis.tick_right()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1055, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " plt.setp(self.ax_col_colors.get_yticklabels(), rotation=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1057, "name": "despine", "kind": "ref", "category": "function", "info": " despine(self.ax_col_colors, left=True, bottom=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1059, "name": "plot_matrix", "kind": "def", "category": "function", "info": " def plot_matrix(self, colorbar_kws, xind, yind, **kws):\n self.data2d = self.data2d.iloc[yind, xind]\n self.mask = self.mask.iloc[yind, xind]\n\n # Try to reorganize specified tick labels, if provided\n xtl = kws.pop(\"xticklabels\", \"auto\")\n try:\n xtl = np.asarray(xtl)[xind]\n except (TypeError, IndexError):\n pass\n ytl = kws.pop(\"yticklabels\", \"auto\")\n try:\n ytl = np.asarray(ytl)[yind]\n except (TypeError, IndexError):\n pass\n\n # Reorganize the annotations to match the heatmap\n annot = kws.pop(\"annot\", None)\n if annot is None or annot is False:\n pass\n else:\n if isinstance(annot, bool):\n annot_data = self.data2d\n else:\n annot_data = np.asarray(annot)\n if annot_data.shape != self.data2d.shape:\n err = \"`data` and `annot` must have same shape.\"\n raise ValueError(err)\n annot_data = annot_data[yind][:, xind]\n annot = annot_data\n\n # Setting ax_cbar=None in clustermap call implies no colorbar\n kws.setdefault(\"cbar\", self.ax_cbar is not None)\n heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n cbar_kws=colorbar_kws, mask=self.mask,\n xticklabels=xtl, yticklabels=ytl, annot=annot, **kws)\n\n ytl = self.ax_heatmap.get_yticklabels()\n ytl_rot = None if not ytl else ytl[0].get_rotation()\n self.ax_heatmap.yaxis.set_ticks_position('right')\n self.ax_heatmap.yaxis.set_label_position('right')\n if ytl_rot is not None:\n ytl = self.ax_heatmap.get_yticklabels()\n plt.setp(ytl, rotation=ytl_rot)\n\n tight_params = dict(h_pad=.02, w_pad=.02)\n if self.ax_cbar is None:\n self._figure.tight_layout(**tight_params)\n else:\n # Turn the colorbar axes off for tight layout so that its\n # ticks don't interfere with the rest of the plot layout.\n # Then move it.\n self.ax_cbar.set_axis_off()\n self._figure.tight_layout(**tight_params)\n self.ax_cbar.set_axis_on()\n self.ax_cbar.set_position(self.cbar_pos)\n\n def plot(self, metric, method, colorbar_kws, row_cluster, col_cluster,\n row_linkage, col_linkage, tree_kws, **kws):\n\n # heatmap square=True sets the aspect ratio on the axes, but that is\n # not compatible with the multi-axes layout of clustergrid\n if kws.get(\"square\", False):\n msg = \"``square=True`` ignored in clustermap\"\n warnings.warn(msg)\n kws.pop(\"square\")\n\n colorbar_kws = {} if colorbar_kws is None else colorbar_kws\n\n self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n row_linkage=row_linkage, col_linkage=col_linkage,\n tree_kws=tree_kws)\n try:\n xind = self.dendrogram_col.reordered_ind\n except AttributeError:\n xind = np.arange(self.data2d.shape[1])\n try:\n yind = self.dendrogram_row.reordered_ind\n except AttributeError:\n yind = np.arange(self.data2d.shape[0])\n\n self.plot_colors(xind, yind, **kws)\n self.plot_matrix(colorbar_kws, xind, yind, **kws)\n return self\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1092, "name": "heatmap", "kind": "ref", "category": "function", "info": " heatmap(self.data2d, ax=self.ax_heatmap, cbar_ax=self.ax_cbar,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1096, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1097, "name": "get_rotation", "kind": "ref", "category": "function", "info": " ytl_rot = None if not ytl else ytl[0].get_rotation()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1098, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_ticks_position('right')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1099, "name": "set_label_position", "kind": "ref", "category": "function", "info": " self.ax_heatmap.yaxis.set_label_position('right')\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1101, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " ytl = self.ax_heatmap.get_yticklabels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1111, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1113, "name": "set_axis_on", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_axis_on()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1114, "name": "set_position", "kind": "ref", "category": "function", "info": " self.ax_cbar.set_position(self.cbar_pos)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1128, "name": "plot_dendrograms", "kind": "ref", "category": "function", "info": " self.plot_dendrograms(row_cluster, col_cluster, metric, method,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1140, "name": "plot_colors", "kind": "ref", "category": "function", "info": " self.plot_colors(xind, yind, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1141, "name": "plot_matrix", "kind": "ref", "category": "function", "info": " self.plot_matrix(colorbar_kws, xind, yind, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1145, "name": "clustermap", "kind": "def", "category": "function", "info": "def clustermap(\n data, *,\n pivot_kws=None, method='average', metric='euclidean',\n z_score=None, standard_scale=None, figsize=(10, 10),\n cbar_kws=None, row_cluster=True, col_cluster=True,\n row_linkage=None, col_linkage=None,\n row_colors=None, col_colors=None, mask=None,\n dendrogram_ratio=.2, colors_ratio=0.03,\n cbar_pos=(.02, .8, .05, .18), tree_kws=None,\n **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/matrix.py", "rel_fname": "seaborn/matrix.py", "line": 1251, "name": "ClusterGrid", "kind": "ref", "category": "function", "info": " plotter = ClusterGrid(data, pivot_kws=pivot_kws, figsize=figsize,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 8, "name": "palplot", "kind": "def", "category": "function", "info": "def palplot(pal, size=1):\n \"\"\"Plot the values in a color palette as a horizontal array.\n\n Parameters\n ----------\n pal : sequence of matplotlib colors\n colors, i.e. as returned by seaborn.color_palette()\n size :\n scaling factor for size of plot\n\n \"\"\"\n n = len(pal)\n f, ax = plt.subplots(1, 1, figsize=(n * size, size))\n ax.imshow(np.arange(n).reshape(1, n),\n cmap=mpl.colors.ListedColormap(list(pal)),\n interpolation=\"nearest\", aspect=\"auto\")\n ax.set_xticks(np.arange(n) - .5)\n ax.set_yticks([-.5, .5])\n # Ensure nice border between colors\n ax.set_xticklabels([\"\" for _ in range(n)])\n # The proper way to set no ticks\n ax.yaxis.set_major_locator(ticker.NullLocator())\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 22, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap=mpl.colors.ListedColormap(list(pal)),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 24, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax.set_xticks(np.arange(n) - .5)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 25, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax.set_yticks([-.5, .5])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 27, "name": "set_xticklabels", "kind": "ref", "category": "function", "info": " ax.set_xticklabels([\"\" for _ in range(n)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 29, "name": "set_major_locator", "kind": "ref", "category": "function", "info": " ax.yaxis.set_major_locator(ticker.NullLocator())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 32, "name": "dogplot", "kind": "def", "category": "function", "info": "def dogplot(*_, **__):\n \"\"\"Who's a good boy?\"\"\"\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n from io import BytesIO\n\n url = \"https://github.com/mwaskom/seaborn-data/raw/master/png/img{}.png\"\n pic = np.random.randint(2, 7)\n data = BytesIO(urlopen(url.format(pic)).read())\n img = plt.imread(data)\n f, ax = plt.subplots(figsize=(5, 5), dpi=100)\n f.subplots_adjust(0, 0, 1, 1)\n ax.imshow(img)\n ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 41, "name": "randint", "kind": "ref", "category": "function", "info": " pic = np.random.randint(2, 7)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/miscplot.py", "rel_fname": "seaborn/miscplot.py", "line": 47, "name": "set_axis_off", "kind": "ref", "category": "function", "info": " ax.set_axis_off()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 60, "name": "_ColorPalette", "kind": "def", "category": "class", "info": "__enter__\t__exit__\tas_hex\t_repr_html_"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 62, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n \"\"\"Open the context.\"\"\"\n from .rcmod import set_palette\n self._orig_palette = color_palette()\n set_palette(self)\n return self\n\n def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 65, "name": "color_palette", "kind": "ref", "category": "function", "info": " self._orig_palette = color_palette()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 66, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 69, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, *args):\n \"\"\"Close the context.\"\"\"\n from .rcmod import set_palette\n set_palette(self._orig_palette)\n\n def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 72, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(self._orig_palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 74, "name": "as_hex", "kind": "def", "category": "function", "info": " def as_hex(self):\n \"\"\"Return a color palette with hex codes instead of RGB values.\"\"\"\n hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n return _ColorPalette(hex)\n\n def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 76, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " hex = [mpl.colors.rgb2hex(rgb) for rgb in self]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 77, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(hex)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 79, "name": "_repr_html_", "kind": "def", "category": "function", "info": " def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 84, "name": "as_hex", "kind": "ref", "category": "function", "info": " for i, c in enumerate(self.as_hex()):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 93, "name": "_patch_colormap_display", "kind": "def", "category": "function", "info": "def _patch_colormap_display():\n \"\"\"Simplify the rich display of matplotlib color maps in a notebook.\"\"\"\n def _repr_png_(self):\n \"\"\"Generate a PNG representation of the Colormap.\"\"\"\n import io\n from PIL import Image\n import numpy as np\n IMAGE_SIZE = (400, 50)\n X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))\n pixels = self(X, bytes=True)\n png_bytes = io.BytesIO()\n Image.fromarray(pixels).save(png_bytes, format='png')\n return png_bytes.getvalue()\n\n def _repr_html_(self):\n \"\"\"Generate an HTML representation of the Colormap.\"\"\"\n import base64\n png_bytes = self._repr_png_()\n png_base64 = base64.b64encode(png_bytes).decode('ascii')\n return ('')\n\n mpl.colors.Colormap._repr_png_ = _repr_png_\n mpl.colors.Colormap._repr_html_ = _repr_html_\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 95, "name": "_repr_png_", "kind": "def", "category": "function", "info": " def _repr_png_(self):\n \"\"\"Generate a PNG representation of the Colormap.\"\"\"\n import io\n from PIL import Image\n import numpy as np\n IMAGE_SIZE = (400, 50)\n X = np.tile(np.linspace(0, 1, IMAGE_SIZE[0]), (IMAGE_SIZE[1], 1))\n pixels = self(X, bytes=True)\n png_bytes = io.BytesIO()\n Image.fromarray(pixels).save(png_bytes, format='png')\n return png_bytes.getvalue()\n\n def _repr_html_(self):\n \"\"\"Generate an HTML representation of the Colormap.\"\"\"\n import base64\n png_bytes = self._repr_png_()\n png_base64 = base64.b64encode(png_bytes).decode('ascii')\n return ('')\n\n mpl.colors.Colormap._repr_png_ = _repr_png_\n mpl.colors.Colormap._repr_html_ = _repr_html_\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 102, "name": "self", "kind": "ref", "category": "function", "info": " pixels = self(X, bytes=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 105, "name": "getvalue", "kind": "ref", "category": "function", "info": " return png_bytes.getvalue()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 107, "name": "_repr_html_", "kind": "def", "category": "function", "info": " def _repr_html_(self):\n \"\"\"Rich display of the color palette in an HTML frontend.\"\"\"\n s = 55\n n = len(self)\n html = f''\n for i, c in enumerate(self.as_hex()):\n html += (\n f''\n )\n html += ''\n return html\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 110, "name": "_repr_png_", "kind": "ref", "category": "function", "info": " png_bytes = self._repr_png_()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 121, "name": "color_palette", "kind": "def", "category": "function", "info": "def color_palette(palette=None, n_colors=None, desat=None, as_cmap=False):\n \"\"\"Return a list of colors or continuous colormap defining a palette.\n\n Possible ``palette`` values include:\n - Name of a seaborn palette (deep, muted, bright, pastel, dark, colorblind)\n - Name of matplotlib colormap\n - 'husl' or 'hls'\n - 'ch:'\n - 'light:', 'dark:', 'blend:,',\n - A sequence of colors in any format matplotlib accepts\n\n Calling this function with ``palette=None`` will return the current\n matplotlib color cycle.\n\n This function can also be used in a ``with`` statement to temporarily\n set the color cycle for a plot or set of plots.\n\n See the :ref:`tutorial ` for more information.\n\n Parameters\n ----------\n palette : None, string, or sequence, optional\n Name of palette or None to return current palette. If a sequence, input\n colors are used but possibly cycled and desaturated.\n n_colors : int, optional\n Number of colors in the palette. If ``None``, the default will depend\n on how ``palette`` is specified. Named palettes default to 6 colors,\n but grabbing the current palette or passing in a list of colors will\n not change the number of colors unless this is specified. Asking for\n more colors than exist in the palette will cause it to cycle. Ignored\n when ``as_cmap`` is True.\n desat : float, optional\n Proportion to desaturate each color by.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n set_palette : Set the default color cycle for all plots.\n set_color_codes : Reassign color codes like ``\"b\"``, ``\"g\"``, etc. to\n colors from one of the seaborn palettes.\n\n Examples\n --------\n\n .. include:: ../docstrings/color_palette.rst\n\n \"\"\"\n if palette is None:\n palette = get_color_cycle()\n if n_colors is None:\n n_colors = len(palette)\n\n elif not isinstance(palette, str):\n palette = palette\n if n_colors is None:\n n_colors = len(palette)\n else:\n\n if n_colors is None:\n # Use all colors in a qualitative palette or 6 of another kind\n n_colors = QUAL_PALETTE_SIZES.get(palette, 6)\n\n if palette in SEABORN_PALETTES:\n # Named \"seaborn variant\" of matplotlib default color cycle\n palette = SEABORN_PALETTES[palette]\n\n elif palette == \"hls\":\n # Evenly spaced colors in cylindrical RGB space\n palette = hls_palette(n_colors, as_cmap=as_cmap)\n\n elif palette == \"husl\":\n # Evenly spaced colors in cylindrical Lab space\n palette = husl_palette(n_colors, as_cmap=as_cmap)\n\n elif palette.lower() == \"jet\":\n # Paternalism\n raise ValueError(\"No.\")\n\n elif palette.startswith(\"ch:\"):\n # Cubehelix palette with params specified in string\n args, kwargs = _parse_cubehelix_args(palette)\n palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n\n elif palette.startswith(\"light:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"dark:\"):\n # light palette to color specified in string\n _, color = palette.split(\":\")\n reverse = color.endswith(\"_r\")\n if reverse:\n color = color[:-2]\n palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n\n elif palette.startswith(\"blend:\"):\n # blend palette between colors specified in string\n _, colors = palette.split(\":\")\n colors = colors.split(\",\")\n palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n\n else:\n try:\n # Perhaps a named matplotlib colormap?\n palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n except (ValueError, KeyError): # Error class changed in mpl36\n raise ValueError(f\"{palette!r} is not a valid palette name\")\n\n if desat is not None:\n palette = [desaturate(c, desat) for c in palette]\n\n if not as_cmap:\n\n # Always return as many colors as we asked for\n pal_cycle = cycle(palette)\n palette = [next(pal_cycle) for _ in range(n_colors)]\n\n # Always return in r, g, b tuple format\n try:\n palette = map(mpl.colors.colorConverter.to_rgb, palette)\n palette = _ColorPalette(palette)\n except ValueError:\n raise ValueError(f\"Could not generate a palette for {palette}\")\n\n return palette\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 174, "name": "get_color_cycle", "kind": "ref", "category": "function", "info": " palette = get_color_cycle()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 194, "name": "hls_palette", "kind": "ref", "category": "function", "info": " palette = hls_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 198, "name": "husl_palette", "kind": "ref", "category": "function", "info": " palette = husl_palette(n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 206, "name": "_parse_cubehelix_args", "kind": "ref", "category": "function", "info": " args, kwargs = _parse_cubehelix_args(palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 207, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " palette = cubehelix_palette(n_colors, *args, **kwargs, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 215, "name": "light_palette", "kind": "ref", "category": "function", "info": " palette = light_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 223, "name": "dark_palette", "kind": "ref", "category": "function", "info": " palette = dark_palette(color, n_colors, reverse=reverse, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 229, "name": "blend_palette", "kind": "ref", "category": "function", "info": " palette = blend_palette(colors, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 234, "name": "mpl_palette", "kind": "ref", "category": "function", "info": " palette = mpl_palette(palette, n_colors, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 239, "name": "desaturate", "kind": "ref", "category": "function", "info": " palette = [desaturate(c, desat) for c in palette]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 250, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " palette = _ColorPalette(palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 257, "name": "hls_palette", "kind": "def", "category": "function", "info": "def hls_palette(n_colors=6, h=.01, l=.6, s=.65, as_cmap=False): # noqa\n \"\"\"\n Return hues with constant lightness and saturation in the HLS system.\n\n The hues are evenly sampled along a circular path. The resulting palette will be\n appropriate for categorical or cyclical data.\n\n The `h`, `l`, and `s` values should be between 0 and 1.\n\n .. note::\n While the separation of the resulting colors will be mathematically\n constant, the HLS system does not construct a perceptually-uniform space,\n so their apparent intensity will vary.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n h : float\n The value of the first hue.\n l : float\n The lightness value.\n s : float\n The saturation intensity.\n as_cmap : bool\n If True, return a matplotlib colormap object.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n husl_palette : Make a palette using evenly spaced hues in the HUSL system.\n\n Examples\n --------\n .. include:: ../docstrings/hls_palette.rst\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues -= hues.astype(int)\n palette = [colorsys.hls_to_rgb(h_i, l, s) for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hls\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 303, "name": "astype", "kind": "ref", "category": "function", "info": " hues -= hues.astype(int)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 306, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hls\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 308, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 311, "name": "husl_palette", "kind": "def", "category": "function", "info": "def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa\n \"\"\"\n Return hues with constant lightness and saturation in the HUSL system.\n\n The hues are evenly sampled along a circular path. The resulting palette will be\n appropriate for categorical or cyclical data.\n\n The `h`, `l`, and `s` values should be between 0 and 1.\n\n This function is similar to :func:`hls_palette`, but it uses a nonlinear color\n space that is more perceptually uniform.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n h : float\n The value of the first hue.\n l : float\n The lightness value.\n s : float\n The saturation intensity.\n as_cmap : bool\n If True, return a matplotlib colormap object.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n hls_palette : Make a palette using evenly spaced hues in the HSL system.\n\n Examples\n --------\n .. include:: ../docstrings/husl_palette.rst\n\n \"\"\"\n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues *= 359\n s *= 99\n l *= 99 # noqa\n palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hsl\")\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 358, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 360, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " return mpl.colors.ListedColormap(palette, \"hsl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 362, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 365, "name": "mpl_palette", "kind": "def", "category": "function", "info": "def mpl_palette(name, n_colors=6, as_cmap=False):\n \"\"\"\n Return a palette or colormap from the matplotlib registry.\n\n For continuous palettes, evenly-spaced discrete samples are chosen while\n excluding the minimum and maximum value in the colormap to provide better\n contrast at the extremes.\n\n For qualitative palettes (e.g. those from colorbrewer), exact values are\n indexed (rather than interpolated), but fewer than `n_colors` can be returned\n if the palette does not define that many.\n\n Parameters\n ----------\n name : string\n Name of the palette. This should be a named matplotlib colormap.\n n_colors : int\n Number of discrete colors in the palette.\n\n Returns\n -------\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n Examples\n --------\n .. include:: ../docstrings/mpl_palette.rst\n\n \"\"\"\n if name.endswith(\"_d\"):\n sub_name = name[:-2]\n if sub_name.endswith(\"_r\"):\n reverse = True\n sub_name = sub_name[:-2]\n else:\n reverse = False\n pal = color_palette(sub_name, 2) + [\"#333333\"]\n if reverse:\n pal = pal[::-1]\n cmap = blend_palette(pal, n_colors, as_cmap=True)\n else:\n cmap = get_colormap(name)\n\n if name in MPL_QUAL_PALS:\n bins = np.linspace(0, 1, MPL_QUAL_PALS[name])[:n_colors]\n else:\n bins = np.linspace(0, 1, int(n_colors) + 2)[1:-1]\n palette = list(map(tuple, cmap(bins)[:, :3]))\n\n if as_cmap:\n return cmap\n else:\n return _ColorPalette(palette)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 400, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal = color_palette(sub_name, 2) + [\"#333333\"]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 403, "name": "blend_palette", "kind": "ref", "category": "function", "info": " cmap = blend_palette(pal, n_colors, as_cmap=True)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 405, "name": "get_colormap", "kind": "ref", "category": "function", "info": " cmap = get_colormap(name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 411, "name": "cmap", "kind": "ref", "category": "function", "info": " palette = list(map(tuple, cmap(bins)[:, :3]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 416, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 419, "name": "_color_to_rgb", "kind": "def", "category": "function", "info": "def _color_to_rgb(color, input):\n \"\"\"Add some more flexibility to color choices.\"\"\"\n if input == \"hls\":\n color = colorsys.hls_to_rgb(*color)\n elif input == \"husl\":\n color = husl.husl_to_rgb(*color)\n color = tuple(np.clip(color, 0, 1))\n elif input == \"xkcd\":\n color = xkcd_rgb[color]\n\n return mpl.colors.to_rgb(color)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 424, "name": "husl_to_rgb", "kind": "ref", "category": "function", "info": " color = husl.husl_to_rgb(*color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 429, "name": "to_rgb", "kind": "ref", "category": "function", "info": " return mpl.colors.to_rgb(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 432, "name": "dark_palette", "kind": "def", "category": "function", "info": "def dark_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from dark to ``color``.\n\n This kind of palette is good for data that range between relatively\n uninteresting low values and interesting high values.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_dark_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex, rgb-tuple, or html color name\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n .. include:: ../docstrings/dark_palette.rst\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 15\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 475, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 476, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 478, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 480, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 483, "name": "light_palette", "kind": "def", "category": "function", "info": "def light_palette(color, n_colors=6, reverse=False, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a sequential palette that blends from light to ``color``.\n\n The ``color`` parameter can be specified in a number of ways, including\n all options for defining a color in matplotlib and several additional\n color spaces that are handled by seaborn. You can also use the database\n of named colors from the XKCD color survey.\n\n If you are using a Jupyter notebook, you can also choose this palette\n interactively with the :func:`choose_light_palette` function.\n\n Parameters\n ----------\n color : base color for high values\n hex code, html color name, or tuple in `input` space.\n n_colors : int, optional\n number of colors in the palette\n reverse : bool, optional\n if True, reverse the direction of the blend\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n input : {'rgb', 'hls', 'husl', xkcd'}\n Color space to interpret the input color. The first three options\n apply to tuple inputs and the latter applies to string inputs.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n diverging_palette : Create a diverging palette with two colors.\n\n Examples\n --------\n .. include:: ../docstrings/light_palette.rst\n\n \"\"\"\n rgb = _color_to_rgb(color, input)\n h, s, l = husl.rgb_to_husl(*rgb)\n gray_s, gray_l = .15 * s, 95\n gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n colors = [rgb, gray] if reverse else [gray, rgb]\n return blend_palette(colors, n_colors, as_cmap)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 523, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " rgb = _color_to_rgb(color, input)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 524, "name": "rgb_to_husl", "kind": "ref", "category": "function", "info": " h, s, l = husl.rgb_to_husl(*rgb)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 526, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " gray = _color_to_rgb((h, gray_s, gray_l), input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 528, "name": "blend_palette", "kind": "ref", "category": "function", "info": " return blend_palette(colors, n_colors, as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 531, "name": "diverging_palette", "kind": "def", "category": "function", "info": "def diverging_palette(h_neg, h_pos, s=75, l=50, sep=1, n=6, # noqa\n center=\"light\", as_cmap=False):\n \"\"\"Make a diverging palette between two HUSL colors.\n\n If you are using the IPython notebook, you can also choose this palette\n interactively with the :func:`choose_diverging_palette` function.\n\n Parameters\n ----------\n h_neg, h_pos : float in [0, 359]\n Anchor hues for negative and positive extents of the map.\n s : float in [0, 100], optional\n Anchor saturation for both extents of the map.\n l : float in [0, 100], optional\n Anchor lightness for both extents of the map.\n sep : int, optional\n Size of the intermediate region.\n n : int, optional\n Number of colors in the palette (if not returning a cmap)\n center : {\"light\", \"dark\"}, optional\n Whether the center of the palette is light or dark\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark values.\n light_palette : Create a sequential palette with light values.\n\n Examples\n --------\n .. include: ../docstrings/diverging_palette.rst\n\n \"\"\"\n palfunc = dict(dark=dark_palette, light=light_palette)[center]\n n_half = int(128 - (sep // 2))\n neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n midpoint = dict(light=[(.95, .95, .95)], dark=[(.133, .133, .133)])[center]\n mid = midpoint * sep\n pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 572, "name": "palfunc", "kind": "ref", "category": "function", "info": " neg = palfunc((h_neg, s, l), n_half, reverse=True, input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 573, "name": "palfunc", "kind": "ref", "category": "function", "info": " pos = palfunc((h_pos, s, l), n_half, input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 576, "name": "blend_palette", "kind": "ref", "category": "function", "info": " pal = blend_palette(np.concatenate([neg, mid, pos]), n, as_cmap=as_cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 580, "name": "blend_palette", "kind": "def", "category": "function", "info": "def blend_palette(colors, n_colors=6, as_cmap=False, input=\"rgb\"):\n \"\"\"Make a palette that blends between a list of colors.\n\n Parameters\n ----------\n colors : sequence of colors in various formats interpreted by `input`\n hex code, html color name, or tuple in `input` space.\n n_colors : int, optional\n Number of colors in the palette.\n as_cmap : bool, optional\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n Examples\n --------\n .. include: ../docstrings/blend_palette.rst\n\n \"\"\"\n colors = [_color_to_rgb(color, input) for color in colors]\n name = \"blend\"\n pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n if not as_cmap:\n rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n pal = _ColorPalette(map(tuple, rgb_array))\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 602, "name": "_color_to_rgb", "kind": "ref", "category": "function", "info": " colors = [_color_to_rgb(color, input) for color in colors]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 604, "name": "from_list", "kind": "ref", "category": "function", "info": " pal = mpl.colors.LinearSegmentedColormap.from_list(name, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 606, "name": "pal", "kind": "ref", "category": "function", "info": " rgb_array = pal(np.linspace(0, 1, int(n_colors)))[:, :3] # no alpha\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 607, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " pal = _ColorPalette(map(tuple, rgb_array))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 611, "name": "xkcd_palette", "kind": "def", "category": "function", "info": "def xkcd_palette(colors):\n \"\"\"Make a palette with color names from the xkcd color survey.\n\n See xkcd for the full list of colors: https://xkcd.com/color/rgb/\n\n This is just a simple wrapper around the `seaborn.xkcd_rgb` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the `seaborn.xkcd_rgb` dictionary.\n\n Returns\n -------\n palette\n A list of colors as RGB tuples.\n\n See Also\n --------\n crayon_palette : Make a palette with Crayola crayon colors.\n\n \"\"\"\n palette = [xkcd_rgb[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 634, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 637, "name": "crayon_palette", "kind": "def", "category": "function", "info": "def crayon_palette(colors):\n \"\"\"Make a palette with color names from Crayola crayons.\n\n Colors are taken from here:\n https://en.wikipedia.org/wiki/List_of_Crayola_crayon_colors\n\n This is just a simple wrapper around the `seaborn.crayons` dictionary.\n\n Parameters\n ----------\n colors : list of strings\n List of keys in the `seaborn.crayons` dictionary.\n\n Returns\n -------\n palette\n A list of colors as RGB tuples.\n\n See Also\n --------\n xkcd_palette : Make a palette with named colors from the XKCD color survey.\n\n \"\"\"\n palette = [crayons[name] for name in colors]\n return color_palette(palette, len(palette))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 661, "name": "color_palette", "kind": "ref", "category": "function", "info": " return color_palette(palette, len(palette))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 664, "name": "cubehelix_palette", "kind": "def", "category": "function", "info": "def cubehelix_palette(n_colors=6, start=0, rot=.4, gamma=1.0, hue=0.8,\n light=.85, dark=.15, reverse=False, as_cmap=False):\n \"\"\"Make a sequential palette from the cubehelix system.\n\n This produces a colormap with linearly-decreasing (or increasing)\n brightness. That means that information will be preserved if printed to\n black and white or viewed by someone who is colorblind. \"cubehelix\" is\n also available as a matplotlib-based palette, but this function gives the\n user more control over the look of the palette and has a different set of\n defaults.\n\n In addition to using this function, it is also possible to generate a\n cubehelix palette generally in seaborn using a string starting with\n `ch:` and containing other parameters (e.g. `\"ch:s=.25,r=-.5\"`).\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n start : float, 0 <= start <= 3\n The hue value at the start of the helix.\n rot : float\n Rotations around the hue wheel over the range of the palette.\n gamma : float 0 <= gamma\n Nonlinearity to emphasize dark (gamma < 1) or light (gamma > 1) colors.\n hue : float, 0 <= hue <= 1\n Saturation of the colors.\n dark : float 0 <= dark <= 1\n Intensity of the darkest color in the palette.\n light : float 0 <= light <= 1\n Intensity of the lightest color in the palette.\n reverse : bool\n If True, the palette will go from dark to light.\n as_cmap : bool\n If True, return a :class:`matplotlib.colors.ListedColormap`.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n choose_cubehelix_palette : Launch an interactive widget to select cubehelix\n palette parameters.\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n\n References\n ----------\n Green, D. A. (2011). \"A colour scheme for the display of astronomical\n intensity images\". Bulletin of the Astromical Society of India, Vol. 39,\n p. 289-295.\n\n Examples\n --------\n .. include:: ../docstrings/cubehelix_palette.rst\n\n \"\"\"\n def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 723, "name": "get_color_function", "kind": "def", "category": "function", "info": " def get_color_function(p0, p1):\n # Copied from matplotlib because it lives in private module\n def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 725, "name": "color", "kind": "def", "category": "function", "info": " def color(x):\n # Apply gamma factor to emphasise low or high intensity values\n xg = x ** gamma\n\n # Calculate amplitude and angle of deviation from the black\n # to white diagonal in the plane of constant\n # perceived intensity.\n a = hue * xg * (1 - xg) / 2\n\n phi = 2 * np.pi * (start / 3 + rot * x)\n\n return xg + a * (p0 * np.cos(phi) + p1 * np.sin(phi))\n return color\n\n cdict = {\n \"red\": get_color_function(-0.14861, 1.78277),\n \"green\": get_color_function(-0.29227, -0.90649),\n \"blue\": get_color_function(1.97294, 0.0),\n }\n\n cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n\n x = np.linspace(light, dark, int(n_colors))\n pal = cmap(x)[:, :3].tolist()\n if reverse:\n pal = pal[::-1]\n\n if as_cmap:\n x_256 = np.linspace(light, dark, 256)\n if reverse:\n x_256 = x_256[::-1]\n pal_256 = cmap(x_256)\n cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n return cmap\n else:\n return _ColorPalette(pal)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 740, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"red\": get_color_function(-0.14861, 1.78277),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 741, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"green\": get_color_function(-0.29227, -0.90649),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 742, "name": "get_color_function", "kind": "ref", "category": "function", "info": " \"blue\": get_color_function(1.97294, 0.0),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 745, "name": "LinearSegmentedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.LinearSegmentedColormap(\"cubehelix\", cdict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 748, "name": "cmap", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 748, "name": "tolist", "kind": "ref", "category": "function", "info": " pal = cmap(x)[:, :3].tolist()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 756, "name": "cmap", "kind": "ref", "category": "function", "info": " pal_256 = cmap(x_256)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 757, "name": "ListedColormap", "kind": "ref", "category": "function", "info": " cmap = mpl.colors.ListedColormap(pal_256, \"seaborn_cubehelix\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 760, "name": "_ColorPalette", "kind": "ref", "category": "function", "info": " return _ColorPalette(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 763, "name": "_parse_cubehelix_args", "kind": "def", "category": "function", "info": "def _parse_cubehelix_args(argstr):\n \"\"\"Turn stringified cubehelix params into args/kwargs.\"\"\"\n\n if argstr.startswith(\"ch:\"):\n argstr = argstr[3:]\n\n if argstr.endswith(\"_r\"):\n reverse = True\n argstr = argstr[:-2]\n else:\n reverse = False\n\n if not argstr:\n return [], {\"reverse\": reverse}\n\n all_args = argstr.split(\",\")\n\n args = [float(a.strip(\" \")) for a in all_args if \"=\" not in a]\n\n kwargs = [a.split(\"=\") for a in all_args if \"=\" in a]\n kwargs = {k.strip(\" \"): float(v.strip(\" \")) for k, v in kwargs}\n\n kwarg_map = dict(\n s=\"start\", r=\"rot\", g=\"gamma\",\n h=\"hue\", l=\"light\", d=\"dark\", # noqa: E741\n )\n\n kwargs = {kwarg_map.get(k, k): v for k, v in kwargs.items()}\n\n if reverse:\n kwargs[\"reverse\"] = True\n\n return args, kwargs\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 798, "name": "set_color_codes", "kind": "def", "category": "function", "info": "def set_color_codes(palette=\"deep\"):\n \"\"\"Change how matplotlib color shorthands are interpreted.\n\n Calling this will change how shorthand codes like \"b\" or \"g\"\n are interpreted by matplotlib in subsequent plots.\n\n Parameters\n ----------\n palette : {deep, muted, pastel, dark, bright, colorblind}\n Named seaborn palette to use as the source of colors.\n\n See Also\n --------\n set : Color codes can be set through the high-level seaborn style\n manager.\n set_palette : Color codes can also be set through the function that\n sets the matplotlib color cycle.\n\n \"\"\"\n if palette == \"reset\":\n colors = [\n (0., 0., 1.),\n (0., .5, 0.),\n (1., 0., 0.),\n (.75, 0., .75),\n (.75, .75, 0.),\n (0., .75, .75),\n (0., 0., 0.)\n ]\n elif not isinstance(palette, str):\n err = \"set_color_codes requires a named seaborn palette\"\n raise TypeError(err)\n elif palette in SEABORN_PALETTES:\n if not palette.endswith(\"6\"):\n palette = palette + \"6\"\n colors = SEABORN_PALETTES[palette] + [(.1, .1, .1)]\n else:\n err = f\"Cannot set colors with palette '{palette}'\"\n raise ValueError(err)\n\n for code, color in zip(\"bgrmyck\", colors):\n rgb = mpl.colors.colorConverter.to_rgb(color)\n mpl.colors.colorConverter.colors[code] = rgb\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/palettes.py", "rel_fname": "seaborn/palettes.py", "line": 839, "name": "to_rgb", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgb(color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 81, "name": "set_theme", "kind": "def", "category": "function", "info": "def set_theme(context=\"notebook\", style=\"darkgrid\", palette=\"deep\",\n font=\"sans-serif\", font_scale=1, color_codes=True, rc=None):\n \"\"\"\n Set aspects of the visual theme for all matplotlib and seaborn plots.\n\n This function changes the global defaults for all plots using the\n matplotlib rcParams system. The themeing is decomposed into several distinct\n sets of parameter values.\n\n The options are illustrated in the :doc:`aesthetics <../tutorial/aesthetics>`\n and :doc:`color palette <../tutorial/color_palettes>` tutorials.\n\n Parameters\n ----------\n context : string or dict\n Scaling parameters, see :func:`plotting_context`.\n style : string or dict\n Axes style parameters, see :func:`axes_style`.\n palette : string or sequence\n Color palette, see :func:`color_palette`.\n font : string\n Font family, see matplotlib font manager.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n rc : dict or None\n Dictionary of rc parameter mappings to override the above.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_theme.rst\n\n \"\"\"\n set_context(context, font_scale)\n set_style(style, rc={\"font.family\": font})\n set_palette(palette, color_codes=color_codes)\n if rc is not None:\n mpl.rcParams.update(rc)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 118, "name": "set_context", "kind": "ref", "category": "function", "info": " set_context(context, font_scale)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 119, "name": "set_style", "kind": "ref", "category": "function", "info": " set_style(style, rc={\"font.family\": font})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 120, "name": "set_palette", "kind": "ref", "category": "function", "info": " set_palette(palette, color_codes=color_codes)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 131, "name": "set_theme", "kind": "ref", "category": "function", "info": " set_theme(*args, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 134, "name": "reset_defaults", "kind": "def", "category": "function", "info": "def reset_defaults():\n \"\"\"Restore all RC params to default settings.\"\"\"\n mpl.rcParams.update(mpl.rcParamsDefault)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 139, "name": "reset_orig", "kind": "def", "category": "function", "info": "def reset_orig():\n \"\"\"Restore all RC params to original settings (respects custom rc).\"\"\"\n from . import _orig_rc_params\n mpl.rcParams.update(_orig_rc_params)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 145, "name": "axes_style", "kind": "def", "category": "function", "info": "def axes_style(style=None, rc=None):\n \"\"\"\n Get the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_style`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n style : None, dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/axes_style.rst\n\n \"\"\"\n if style is None:\n style_dict = {k: mpl.rcParams[k] for k in _style_keys}\n\n elif isinstance(style, dict):\n style_dict = style\n\n else:\n styles = [\"white\", \"dark\", \"whitegrid\", \"darkgrid\", \"ticks\"]\n if style not in styles:\n raise ValueError(f\"style must be one of {', '.join(styles)}\")\n\n # Define colors here\n dark_gray = \".15\"\n light_gray = \".8\"\n\n # Common parameters\n style_dict = {\n\n \"figure.facecolor\": \"white\",\n \"axes.labelcolor\": dark_gray,\n\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": dark_gray,\n \"ytick.color\": dark_gray,\n\n \"axes.axisbelow\": True,\n \"grid.linestyle\": \"-\",\n\n\n \"text.color\": dark_gray,\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\"Arial\", \"DejaVu Sans\", \"Liberation Sans\",\n \"Bitstream Vera Sans\", \"sans-serif\"],\n\n\n \"lines.solid_capstyle\": \"round\",\n \"patch.edgecolor\": \"w\",\n \"patch.force_edgecolor\": True,\n\n \"image.cmap\": \"rocket\",\n\n \"xtick.top\": False,\n \"ytick.right\": False,\n\n }\n\n # Set grid on or off\n if \"grid\" in style:\n style_dict.update({\n \"axes.grid\": True,\n })\n else:\n style_dict.update({\n \"axes.grid\": False,\n })\n\n # Set the color of the background, spines, and grids\n if style.startswith(\"dark\"):\n style_dict.update({\n\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"grid.color\": \"white\",\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style == \"whitegrid\":\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": light_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n elif style in [\"white\", \"ticks\"]:\n style_dict.update({\n\n \"axes.facecolor\": \"white\",\n \"axes.edgecolor\": dark_gray,\n \"grid.color\": light_gray,\n\n \"axes.spines.left\": True,\n \"axes.spines.bottom\": True,\n \"axes.spines.right\": True,\n \"axes.spines.top\": True,\n\n })\n\n # Show or hide the axes ticks\n if style == \"ticks\":\n style_dict.update({\n \"xtick.bottom\": True,\n \"ytick.left\": True,\n })\n else:\n style_dict.update({\n \"xtick.bottom\": False,\n \"ytick.left\": False,\n })\n\n # Remove entries that are not defined in the base list of valid keys\n # This lets us handle matplotlib <=/> 2.0\n style_dict = {k: v for k, v in style_dict.items() if k in _style_keys}\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _style_keys}\n style_dict.update(rc)\n\n # Wrap in an _AxesStyle object so this can be used in a with statement\n style_object = _AxesStyle(style_dict)\n\n return style_object\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 297, "name": "_AxesStyle", "kind": "ref", "category": "function", "info": " style_object = _AxesStyle(style_dict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 302, "name": "set_style", "kind": "def", "category": "function", "info": "def set_style(style=None, rc=None):\n \"\"\"\n Set the parameters that control the general style of the plots.\n\n The style parameters control properties like the color of the background and\n whether a grid is enabled by default. This is accomplished using the\n matplotlib rcParams system.\n\n The options are illustrated in the\n :doc:`aesthetics tutorial <../tutorial/aesthetics>`.\n\n See :func:`axes_style` to get the parameter values.\n\n Parameters\n ----------\n style : dict, or one of {darkgrid, whitegrid, dark, white, ticks}\n A dictionary of parameters or the name of a preconfigured style.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n style dictionaries. This only updates parameters that are\n considered part of the style definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_style.rst\n\n \"\"\"\n style_object = axes_style(style, rc)\n mpl.rcParams.update(style_object)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 330, "name": "axes_style", "kind": "ref", "category": "function", "info": " style_object = axes_style(style, rc)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 334, "name": "plotting_context", "kind": "def", "category": "function", "info": "def plotting_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Get the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_context`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n context : None, dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/plotting_context.rst\n\n \"\"\"\n if context is None:\n context_dict = {k: mpl.rcParams[k] for k in _context_keys}\n\n elif isinstance(context, dict):\n context_dict = context\n\n else:\n\n contexts = [\"paper\", \"notebook\", \"talk\", \"poster\"]\n if context not in contexts:\n raise ValueError(f\"context must be in {', '.join(contexts)}\")\n\n # Set up dictionary of default parameters\n texts_base_context = {\n\n \"font.size\": 12,\n \"axes.labelsize\": 12,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 11,\n \"ytick.labelsize\": 11,\n \"legend.fontsize\": 11,\n \"legend.title_fontsize\": 12,\n\n }\n\n base_context = {\n\n \"axes.linewidth\": 1.25,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.5,\n \"lines.markersize\": 6,\n \"patch.linewidth\": 1,\n\n \"xtick.major.width\": 1.25,\n \"ytick.major.width\": 1.25,\n \"xtick.minor.width\": 1,\n \"ytick.minor.width\": 1,\n\n \"xtick.major.size\": 6,\n \"ytick.major.size\": 6,\n \"xtick.minor.size\": 4,\n \"ytick.minor.size\": 4,\n\n }\n base_context.update(texts_base_context)\n\n # Scale all the parameters by the same factor depending on the context\n scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]\n context_dict = {k: v * scaling for k, v in base_context.items()}\n\n # Now independently scale the fonts\n font_keys = texts_base_context.keys()\n font_dict = {k: context_dict[k] * font_scale for k in font_keys}\n context_dict.update(font_dict)\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _context_keys}\n context_dict.update(rc)\n\n # Wrap in a _PlottingContext object so this can be used in a with statement\n context_object = _PlottingContext(context_dict)\n\n return context_object\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 430, "name": "_PlottingContext", "kind": "ref", "category": "function", "info": " context_object = _PlottingContext(context_dict)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 435, "name": "set_context", "kind": "def", "category": "function", "info": "def set_context(context=None, font_scale=1, rc=None):\n \"\"\"\n Set the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n matplotlib rcParams system.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n See :func:`plotting_context` to get the parameter values.\n\n Parameters\n ----------\n context : dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/set_context.rst\n\n \"\"\"\n context_object = plotting_context(context, font_scale, rc)\n mpl.rcParams.update(context_object)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 468, "name": "plotting_context", "kind": "ref", "category": "function", "info": " context_object = plotting_context(context, font_scale, rc)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 472, "name": "_RCAesthetics", "kind": "def", "category": "class", "info": "__enter__\t__exit__\t__call__"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 473, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self):\n rc = mpl.rcParams\n self._orig = {k: rc[k] for k in self._keys}\n self._set(self)\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 476, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 478, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(self, exc_type, exc_value, exc_tb):\n self._set(self._orig)\n\n def __call__(self, func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 479, "name": "_set", "kind": "ref", "category": "function", "info": " self._set(self._orig)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 483, "name": "wrapper", "kind": "def", "category": "function", "info": " def wrapper(*args, **kwargs):\n with self:\n return func(*args, **kwargs)\n return wrapper\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 485, "name": "func", "kind": "ref", "category": "function", "info": " return func(*args, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 489, "name": "_AxesStyle", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 495, "name": "_PlottingContext", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 501, "name": "set_palette", "kind": "def", "category": "function", "info": "def set_palette(palette, n_colors=None, desat=None, color_codes=False):\n \"\"\"Set the matplotlib color cycle using a seaborn palette.\n\n Parameters\n ----------\n palette : seaborn color palette | matplotlib colormap | hls | husl\n Palette definition. Should be something :func:`color_palette` can process.\n n_colors : int\n Number of colors in the cycle. The default number of colors will depend\n on the format of ``palette``, see the :func:`color_palette`\n documentation for more information.\n desat : float\n Proportion to desaturate each color by.\n color_codes : bool\n If ``True`` and ``palette`` is a seaborn palette, remap the shorthand\n color codes (e.g. \"b\", \"g\", \"r\", etc.) to the colors from this palette.\n\n See Also\n --------\n color_palette : build a color palette or set the color cycle temporarily\n in a ``with`` statement.\n set_context : set parameters to scale plot elements\n set_style : set the default parameters for figure style\n\n \"\"\"\n colors = palettes.color_palette(palette, n_colors, desat)\n cyl = cycler('color', colors)\n mpl.rcParams['axes.prop_cycle'] = cyl\n if color_codes:\n try:\n palettes.set_color_codes(palette)\n except (ValueError, TypeError):\n pass\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 526, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = palettes.color_palette(palette, n_colors, desat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/rcmod.py", "rel_fname": "seaborn/rcmod.py", "line": 531, "name": "set_color_codes", "kind": "ref", "category": "function", "info": " palettes.set_color_codes(palette)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 24, "name": "_LinearPlotter", "kind": "def", "category": "class", "info": "establish_variables\tdropna\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 31, "name": "establish_variables", "kind": "def", "category": "function", "info": " def establish_variables(self, data, **kws):\n \"\"\"Extract variables from data or use directly.\"\"\"\n self.data = data\n\n # Validate the inputs\n any_strings = any([isinstance(v, str) for v in kws.values()])\n if any_strings and data is None:\n raise ValueError(\"Must pass `data` if using named variables.\")\n\n # Set the variables\n for var, val in kws.items():\n if isinstance(val, str):\n vector = data[val]\n elif isinstance(val, list):\n vector = np.asarray(val)\n else:\n vector = val\n if vector is not None and vector.shape != (1,):\n vector = np.squeeze(vector)\n if np.ndim(vector) > 1:\n err = \"regplot inputs must be 1d\"\n raise ValueError(err)\n setattr(self, var, vector)\n\n def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 55, "name": "dropna", "kind": "def", "category": "function", "info": " def dropna(self, *vars):\n \"\"\"Remove observations with missing data.\"\"\"\n vals = [getattr(self, var) for var in vars]\n vals = [v for v in vals if v is not None]\n not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)\n for var in vars:\n val = getattr(self, var)\n if val is not None:\n setattr(self, var, val[not_na])\n\n def plot(self, ax):\n raise NotImplementedError\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 69, "name": "_RegressionPlotter", "kind": "def", "category": "class", "info": "__init__\tscatter_data\testimate_data\tfit_regression\tfit_fast\tfit_poly\tfit_statsmodels\tfit_lowess\tfit_logx\tbin_predictor\tregress_out\tplot\tscatterplot\tlineplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 106, "name": "establish_variables", "kind": "ref", "category": "function", "info": " self.establish_variables(data, x=x, y=y, units=units,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 111, "name": "dropna", "kind": "ref", "category": "function", "info": " self.dropna(\"x\", \"y\", \"units\", \"x_partial\", \"y_partial\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 115, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.x = self.regress_out(self.x, self.x_partial)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 117, "name": "regress_out", "kind": "ref", "category": "function", "info": " self.y = self.regress_out(self.y, self.y_partial)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 122, "name": "bin_predictor", "kind": "ref", "category": "function", "info": " x_discrete, x_bins = self.bin_predictor(x_bins)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 136, "name": "scatter_data", "kind": "def", "category": "function", "info": " def scatter_data(self):\n \"\"\"Data where each observation is a point.\"\"\"\n x_j = self.x_jitter\n if x_j is None:\n x = self.x\n else:\n x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n\n y_j = self.y_jitter\n if y_j is None:\n y = self.y\n else:\n y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n\n return x, y\n\n @property\n def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 142, "name": "uniform", "kind": "ref", "category": "function", "info": " x = self.x + np.random.uniform(-x_j, x_j, len(self.x))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 148, "name": "uniform", "kind": "ref", "category": "function", "info": " y = self.y + np.random.uniform(-y_j, y_j, len(self.y))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 153, "name": "estimate_data", "kind": "def", "category": "function", "info": " def estimate_data(self):\n \"\"\"Data with a point estimate and CI for each discrete x value.\"\"\"\n x, y = self.x_discrete, self.y\n vals = sorted(np.unique(x))\n points, cis = [], []\n\n for val in vals:\n\n # Get the point estimate of the y variable\n _y = y[x == val]\n est = self.x_estimator(_y)\n points.append(est)\n\n # Compute the confidence interval for this estimate\n if self.x_ci is None:\n cis.append(None)\n else:\n units = None\n if self.x_ci == \"sd\":\n sd = np.std(_y)\n _ci = est - sd, est + sd\n else:\n if self.units is not None:\n units = self.units[x == val]\n boots = algo.bootstrap(_y,\n func=self.x_estimator,\n n_boot=self.n_boot,\n units=units,\n seed=self.seed)\n _ci = utils.ci(boots, self.x_ci)\n cis.append(_ci)\n\n return vals, points, cis\n\n def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 163, "name": "x_estimator", "kind": "ref", "category": "function", "info": " est = self.x_estimator(_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 177, "name": "bootstrap", "kind": "ref", "category": "function", "info": " boots = algo.bootstrap(_y,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 182, "name": "ci", "kind": "ref", "category": "function", "info": " _ci = utils.ci(boots, self.x_ci)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 187, "name": "fit_regression", "kind": "def", "category": "function", "info": " def fit_regression(self, ax=None, x_range=None, grid=None):\n \"\"\"Fit the regression model.\"\"\"\n # Create the grid for the regression\n if grid is None:\n if self.truncate:\n x_min, x_max = self.x_range\n else:\n if ax is None:\n x_min, x_max = x_range\n else:\n x_min, x_max = ax.get_xlim()\n grid = np.linspace(x_min, x_max, 100)\n ci = self.ci\n\n # Fit the regression\n if self.order > 1:\n yhat, yhat_boots = self.fit_poly(grid, self.order)\n elif self.logistic:\n from statsmodels.genmod.generalized_linear_model import GLM\n from statsmodels.genmod.families import Binomial\n yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n family=Binomial())\n elif self.lowess:\n ci = None\n grid, yhat = self.fit_lowess()\n elif self.robust:\n from statsmodels.robust.robust_linear_model import RLM\n yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n elif self.logx:\n yhat, yhat_boots = self.fit_logx(grid)\n else:\n yhat, yhat_boots = self.fit_fast(grid)\n\n # Compute the confidence interval at each grid point\n if ci is None:\n err_bands = None\n else:\n err_bands = utils.ci(yhat_boots, ci, axis=0)\n\n return grid, yhat, err_bands\n\n def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 197, "name": "get_xlim", "kind": "ref", "category": "function", "info": " x_min, x_max = ax.get_xlim()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 203, "name": "fit_poly", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_poly(grid, self.order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 207, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, GLM,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 211, "name": "fit_lowess", "kind": "ref", "category": "function", "info": " grid, yhat = self.fit_lowess()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 214, "name": "fit_statsmodels", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_statsmodels(grid, RLM)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 216, "name": "fit_logx", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_logx(grid)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 218, "name": "fit_fast", "kind": "ref", "category": "function", "info": " yhat, yhat_boots = self.fit_fast(grid)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 224, "name": "ci", "kind": "ref", "category": "function", "info": " err_bands = utils.ci(yhat_boots, ci, axis=0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 228, "name": "fit_fast", "kind": "def", "category": "function", "info": " def fit_fast(self, grid):\n \"\"\"Low-level regression and prediction using linear algebra.\"\"\"\n def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 230, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 231, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 235, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 239, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 247, "name": "fit_poly", "kind": "def", "category": "function", "info": " def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 249, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 253, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(x, y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 257, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(x, y,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 264, "name": "fit_statsmodels", "kind": "def", "category": "function", "info": " def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 270, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 278, "name": "model", "kind": "ref", "category": "function", "info": " yhat = model(_y, _x, **kwargs).fit().predict(grid)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 284, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = reg_func(X, y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 288, "name": "bootstrap", "kind": "ref", "category": "function", "info": " yhat_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 295, "name": "fit_lowess", "kind": "def", "category": "function", "info": " def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 301, "name": "fit_logx", "kind": "def", "category": "function", "info": " def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 306, "name": "reg_func", "kind": "def", "category": "function", "info": " def reg_func(_x, _y):\n return np.linalg.pinv(_x).dot(_y)\n\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def fit_poly(self, grid, order):\n \"\"\"Regression using numpy polyfit for higher-order trends.\"\"\"\n def reg_func(_x, _y):\n return np.polyval(np.polyfit(_x, _y, order), grid)\n\n x, y = self.x, self.y\n yhat = reg_func(x, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(x, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_statsmodels(self, grid, model, **kwargs):\n \"\"\"More general regression function using statsmodels objects.\"\"\"\n import statsmodels.tools.sm_exceptions as sme\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), grid]\n\n def reg_func(_x, _y):\n err_classes = (sme.PerfectSeparationError,)\n try:\n with warnings.catch_warnings():\n if hasattr(sme, \"PerfectSeparationWarning\"):\n # statsmodels>=0.14.0\n warnings.simplefilter(\"error\", sme.PerfectSeparationWarning)\n err_classes = (*err_classes, sme.PerfectSeparationWarning)\n yhat = model(_y, _x, **kwargs).fit().predict(grid)\n except err_classes:\n yhat = np.empty(len(grid))\n yhat.fill(np.nan)\n return yhat\n\n yhat = reg_func(X, y)\n if self.ci is None:\n return yhat, None\n\n yhat_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed)\n return yhat, yhat_boots\n\n def fit_lowess(self):\n \"\"\"Fit a locally-weighted regression, which returns its own grid.\"\"\"\n from statsmodels.nonparametric.smoothers_lowess import lowess\n grid, yhat = lowess(self.y, self.x).T\n return grid, yhat\n\n def fit_logx(self, grid):\n \"\"\"Fit the model in log-space.\"\"\"\n X, y = np.c_[np.ones(len(self.x)), self.x], self.y\n grid = np.c_[np.ones(len(grid)), np.log(grid)]\n\n def reg_func(_x, _y):\n _x = np.c_[_x[:, 0], np.log(_x[:, 1])]\n return np.linalg.pinv(_x).dot(_y)\n\n yhat = grid.dot(reg_func(X, y))\n if self.ci is None:\n return yhat, None\n\n beta_boots = algo.bootstrap(X, y,\n func=reg_func,\n n_boot=self.n_boot,\n units=self.units,\n seed=self.seed).T\n yhat_boots = grid.dot(beta_boots).T\n return yhat, yhat_boots\n\n def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 308, "name": "pinv", "kind": "ref", "category": "function", "info": " return np.linalg.pinv(_x).dot(_y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 310, "name": "reg_func", "kind": "ref", "category": "function", "info": " yhat = grid.dot(reg_func(X, y))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 314, "name": "bootstrap", "kind": "ref", "category": "function", "info": " beta_boots = algo.bootstrap(X, y,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 322, "name": "bin_predictor", "kind": "def", "category": "function", "info": " def bin_predictor(self, bins):\n \"\"\"Discretize a predictor by assigning value to closest bin.\"\"\"\n x = np.asarray(self.x)\n if np.isscalar(bins):\n percentiles = np.linspace(0, 100, bins + 2)[1:-1]\n bins = np.percentile(x, percentiles)\n else:\n bins = np.ravel(bins)\n\n dist = np.abs(np.subtract.outer(x, bins))\n x_binned = bins[np.argmin(dist, axis=1)].ravel()\n\n return x_binned, bins\n\n def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 336, "name": "regress_out", "kind": "def", "category": "function", "info": " def regress_out(self, a, b):\n \"\"\"Regress b from a keeping a's original mean.\"\"\"\n a_mean = a.mean()\n a = a - a_mean\n b = b - b.mean()\n b = np.c_[b]\n a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n return np.asarray(a_prime + a_mean).reshape(a.shape)\n\n def plot(self, ax, scatter_kws, line_kws):\n \"\"\"Draw the full plot.\"\"\"\n # Insert the plot label into the correct set of keyword arguments\n if self.scatter:\n scatter_kws[\"label\"] = self.label\n else:\n line_kws[\"label\"] = self.label\n\n # Use the current color cycle state as a default\n if self.color is None:\n lines, = ax.plot([], [])\n color = lines.get_color()\n lines.remove()\n else:\n color = self.color\n\n # Ensure that color is hex to avoid matplotlib weirdness\n color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n\n # Let color in keyword arguments override overall plot color\n scatter_kws.setdefault(\"color\", color)\n line_kws.setdefault(\"color\", color)\n\n # Draw the constituent plots\n if self.scatter:\n self.scatterplot(ax, scatter_kws)\n\n if self.fit_reg:\n self.lineplot(ax, line_kws)\n\n # Label the axes\n if hasattr(self.x, \"name\"):\n ax.set_xlabel(self.x.name)\n if hasattr(self.y, \"name\"):\n ax.set_ylabel(self.y.name)\n\n def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 342, "name": "pinv", "kind": "ref", "category": "function", "info": " a_prime = a - b.dot(np.linalg.pinv(b).dot(a))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 356, "name": "get_color", "kind": "ref", "category": "function", "info": " color = lines.get_color()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 362, "name": "rgb2hex", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 362, "name": "to_rgb", "kind": "ref", "category": "function", "info": " color = mpl.colors.rgb2hex(mpl.colors.colorConverter.to_rgb(color))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 370, "name": "scatterplot", "kind": "ref", "category": "function", "info": " self.scatterplot(ax, scatter_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 373, "name": "lineplot", "kind": "ref", "category": "function", "info": " self.lineplot(ax, line_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 377, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(self.x.name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 379, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(self.y.name)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 381, "name": "scatterplot", "kind": "def", "category": "function", "info": " def scatterplot(self, ax, kws):\n \"\"\"Draw the data.\"\"\"\n # Treat the line-based markers specially, explicitly setting larger\n # linewidth than is provided by the seaborn style defaults.\n # This would ideally be handled better in matplotlib (i.e., distinguish\n # between edgewidth for solid glyphs and linewidth for line glyphs\n # but this should do for now.\n line_markers = [\"1\", \"2\", \"3\", \"4\", \"+\", \"x\", \"|\", \"_\"]\n if self.x_estimator is None:\n if \"marker\" in kws and kws[\"marker\"] in line_markers:\n lw = mpl.rcParams[\"lines.linewidth\"]\n else:\n lw = mpl.rcParams[\"lines.markeredgewidth\"]\n kws.setdefault(\"linewidths\", lw)\n\n if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:\n kws.setdefault(\"alpha\", .8)\n\n x, y = self.scatter_data\n ax.scatter(x, y, **kws)\n else:\n # TODO abstraction\n ci_kws = {\"color\": kws[\"color\"]}\n if \"alpha\" in kws:\n ci_kws[\"alpha\"] = kws[\"alpha\"]\n ci_kws[\"linewidth\"] = mpl.rcParams[\"lines.linewidth\"] * 1.75\n kws.setdefault(\"s\", 50)\n\n xs, ys, cis = self.estimate_data\n if [ci for ci in cis if ci is not None]:\n for x, ci in zip(xs, cis):\n ax.plot([x, x], ci, **ci_kws)\n ax.scatter(xs, ys, **kws)\n\n def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 415, "name": "lineplot", "kind": "def", "category": "function", "info": " def lineplot(self, ax, kws):\n \"\"\"Draw the model.\"\"\"\n # Fit the regression model\n grid, yhat, err_bands = self.fit_regression(ax)\n edges = grid[0], grid[-1]\n\n # Get set default aesthetics\n fill_color = kws[\"color\"]\n lw = kws.pop(\"lw\", mpl.rcParams[\"lines.linewidth\"] * 1.5)\n kws.setdefault(\"linewidth\", lw)\n\n # Draw the regression line and confidence interval\n line, = ax.plot(grid, yhat, **kws)\n if not self.truncate:\n line.sticky_edges.x[:] = edges # Prevent mpl from adding margin\n if err_bands is not None:\n ax.fill_between(grid, *err_bands, facecolor=fill_color, alpha=.15)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 418, "name": "fit_regression", "kind": "ref", "category": "function", "info": " grid, yhat, err_bands = self.fit_regression(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 565, "name": "lmplot", "kind": "def", "category": "function", "info": "def lmplot(\n data=None, *,\n x=None, y=None, hue=None, col=None, row=None,\n palette=None, col_wrap=None, height=5, aspect=1, markers=\"o\",\n sharex=None, sharey=None, hue_order=None, col_order=None, row_order=None,\n legend=True, legend_out=None, x_estimator=None, x_bins=None,\n x_ci=\"ci\", scatter=True, fit_reg=True, ci=95, n_boot=1000,\n units=None, seed=None, order=1, logistic=False, lowess=False,\n robust=False, logx=False, x_partial=None, y_partial=None,\n truncate=True, x_jitter=None, y_jitter=None, scatter_kws=None,\n line_kws=None, facet_kws=None,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 581, "name": "facet_kw_deprecation", "kind": "def", "category": "function", "info": " def facet_kw_deprecation(key, val):\n msg = (\n f\"{key} is deprecated from the `lmplot` function signature. \"\n \"Please update your code to pass it using `facet_kws`.\"\n )\n if val is not None:\n warnings.warn(msg, UserWarning)\n facet_kws[key] = val\n\n facet_kw_deprecation(\"sharex\", sharex)\n facet_kw_deprecation(\"sharey\", sharey)\n facet_kw_deprecation(\"legend_out\", legend_out)\n\n if data is None:\n raise TypeError(\"Missing required keyword argument `data`.\")\n\n # Reduce the dataframe to only needed columns\n need_cols = [x, y, hue, col, row, units, x_partial, y_partial]\n cols = np.unique([a for a in need_cols if a is not None]).tolist()\n data = data[cols]\n\n # Initialize the grid\n facets = FacetGrid(\n data, row=row, col=col, hue=hue,\n palette=palette,\n row_order=row_order, col_order=col_order, hue_order=hue_order,\n height=height, aspect=aspect, col_wrap=col_wrap,\n **facet_kws,\n )\n\n # Add the markers here as FacetGrid has figured out how many levels of the\n # hue variable are needed and we don't want to duplicate that process\n if facets.hue_names is None:\n n_markers = 1\n else:\n n_markers = len(facets.hue_names)\n if not isinstance(markers, list):\n markers = [markers] * n_markers\n if len(markers) != n_markers:\n raise ValueError(\"markers must be a singleton or a list of markers \"\n \"for each level of the hue variable\")\n facets.hue_kws = {\"marker\": markers}\n\n def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 590, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharex\", sharex)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 591, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"sharey\", sharey)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 592, "name": "facet_kw_deprecation", "kind": "ref", "category": "function", "info": " facet_kw_deprecation(\"legend_out\", legend_out)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 599, "name": "tolist", "kind": "ref", "category": "function", "info": " cols = np.unique([a for a in need_cols if a is not None]).tolist()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 603, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " facets = FacetGrid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 624, "name": "update_datalim", "kind": "def", "category": "function", "info": " def update_datalim(data, x, y, ax, **kws):\n xys = data[[x, y]].to_numpy().astype(float)\n ax.update_datalim(xys, updatey=False)\n ax.autoscale_view(scaley=False)\n\n facets.map_dataframe(update_datalim, x=x, y=y)\n\n # Draw the regression plot on each facet\n regplot_kws = dict(\n x_estimator=x_estimator, x_bins=x_bins, x_ci=x_ci,\n scatter=scatter, fit_reg=fit_reg, ci=ci, n_boot=n_boot, units=units,\n seed=seed, order=order, logistic=logistic, lowess=lowess,\n robust=robust, logx=logx, x_partial=x_partial, y_partial=y_partial,\n truncate=truncate, x_jitter=x_jitter, y_jitter=y_jitter,\n scatter_kws=scatter_kws, line_kws=line_kws,\n )\n facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n facets.set_axis_labels(x, y)\n\n # Add a legend\n if legend and (hue is not None) and (hue not in [col, row]):\n facets.add_legend()\n return facets\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 625, "name": "to_numpy", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 625, "name": "astype", "kind": "ref", "category": "function", "info": " xys = data[[x, y]].to_numpy().astype(float)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 626, "name": "update_datalim", "kind": "ref", "category": "function", "info": " ax.update_datalim(xys, updatey=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 627, "name": "autoscale_view", "kind": "ref", "category": "function", "info": " ax.autoscale_view(scaley=False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 629, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(update_datalim, x=x, y=y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 640, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " facets.map_dataframe(regplot, x=x, y=y, **regplot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 641, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " facets.set_axis_labels(x, y)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 645, "name": "add_legend", "kind": "ref", "category": "function", "info": " facets.add_legend()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 741, "name": "regplot", "kind": "def", "category": "function", "info": "def regplot(\n data=None, *, x=None, y=None,\n x_estimator=None, x_bins=None, x_ci=\"ci\",\n scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,\n seed=None, order=1, logistic=False, lowess=False, robust=False,\n logx=False, x_partial=None, y_partial=None,\n truncate=True, dropna=True, x_jitter=None, y_jitter=None,\n label=None, color=None, marker=\"o\",\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 752, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 843, "name": "residplot", "kind": "def", "category": "function", "info": "def residplot(\n data=None, *, x=None, y=None,\n x_partial=None, y_partial=None, lowess=False,\n order=1, robust=False, dropna=True, label=None, color=None,\n scatter_kws=None, line_kws=None, ax=None\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 904, "name": "_RegressionPlotter", "kind": "ref", "category": "function", "info": " plotter = _RegressionPlotter(x, y, data, ci=None,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/regression.py", "rel_fname": "seaborn/regression.py", "line": 913, "name": "fit_regression", "kind": "ref", "category": "function", "info": " _, yhat, _ = plotter.fit_regression(grid=plotter.x)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 24, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": "_relational_narrative = DocstringComponents(dict(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 175, "name": "from_nested_components", "kind": "ref", "category": "function", "info": "_param_docs = DocstringComponents.from_nested_components(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 177, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " facets=DocstringComponents(_facet_docs),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 178, "name": "DocstringComponents", "kind": "ref", "category": "function", "info": " rel=DocstringComponents(_relational_docs),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 179, "name": "from_function_params", "kind": "ref", "category": "function", "info": " stat=DocstringComponents.from_function_params(EstimateAggregator.__init__),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 183, "name": "_RelationalPlotter", "kind": "def", "category": "class", "info": "add_legend_data\t_update_legend_data"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 192, "name": "add_legend_data", "kind": "def", "category": "function", "info": " def add_legend_data(self, ax, func=None, common_kws=None, semantic_kws=None):\n \"\"\"Add labeled artists to represent the different plot semantics.\"\"\"\n verbosity = self.legend\n if isinstance(verbosity, str) and verbosity not in [\"auto\", \"brief\", \"full\"]:\n err = \"`legend` must be 'auto', 'brief', 'full', or a boolean.\"\n raise ValueError(err)\n elif verbosity is True:\n verbosity = \"auto\"\n\n keys = []\n legend_kws = {}\n common_kws = {} if common_kws is None else common_kws\n semantic_kws = {} if semantic_kws is None else semantic_kws\n\n # Assign a legend title if there is only going to be one sub-legend,\n # otherwise, subtitles will be inserted into the texts list with an\n # invisible handle (which is a hack)\n titles = {\n title for title in\n (self.variables.get(v, None) for v in [\"hue\", \"size\", \"style\"])\n if title is not None\n }\n title = \"\" if len(titles) != 1 else titles.pop()\n title_kws = dict(\n visible=False, color=\"w\", s=0, linewidth=0, marker=\"\", dashes=\"\"\n )\n\n def update(var_name, val_name, **kws):\n\n key = var_name, val_name\n if key in legend_kws:\n legend_kws[key].update(**kws)\n else:\n keys.append(key)\n legend_kws[key] = dict(**kws)\n\n legend_attrs = {\"hue\": \"color\", \"size\": [\"linewidth\", \"s\"], \"style\": None}\n for var, names in legend_attrs.items():\n self._update_legend_data(\n update, var, verbosity, title, title_kws, names, semantic_kws.get(var),\n )\n\n if func is None:\n func = getattr(ax, self._legend_func)\n\n legend_data = {}\n legend_order = []\n\n for key in keys:\n\n _, label = key\n kws = legend_kws[key]\n kws.setdefault(\"color\", \".2\")\n level_kws = {}\n use_attrs = [\n *self._legend_attributes,\n *common_kws,\n *[attr for var_attrs in semantic_kws.values() for attr in var_attrs],\n ]\n for attr in use_attrs:\n if attr in kws:\n level_kws[attr] = kws[attr]\n artist = func([], [], label=label, **{**common_kws, **level_kws})\n if func.__name__ == \"plot\":\n artist = artist[0]\n legend_data[key] = artist\n legend_order.append(key)\n\n self.legend_title = title\n self.legend_data = legend_data\n self.legend_order = legend_order\n\n def _update_legend_data(\n self,\n update,\n var,\n verbosity,\n title,\n title_kws,\n attr_names,\n other_props,\n ):\n\n brief_ticks = 6\n mapper = getattr(self, f\"_{var}_map\")\n\n brief = mapper.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(mapper.levels) > brief_ticks)\n )\n if brief:\n if isinstance(mapper.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n limits = min(mapper.levels), max(mapper.levels)\n levels, formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[var].infer_objects().dtype\n )\n elif mapper.levels is None:\n levels = formatted_levels = []\n else:\n levels = formatted_levels = mapper.levels\n\n if not title and self.variables.get(var, None) is not None:\n update((self.variables[var], \"title\"), self.variables[var], **title_kws)\n\n other_props = {} if other_props is None else other_props\n\n for level, formatted_level in zip(levels, formatted_levels):\n if level is not None:\n attr = mapper(level)\n if isinstance(attr_names, list):\n attr = {name: attr for name in attr_names}\n elif attr_names is not None:\n attr = {attr_names: attr}\n attr.update({k: v[level] for k, v in other_props.items() if level in v})\n update(self.variables[var], formatted_level, **attr)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 230, "name": "_update_legend_data", "kind": "ref", "category": "function", "info": " self._update_legend_data(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 254, "name": "func", "kind": "ref", "category": "function", "info": " artist = func([], [], label=label, **{**common_kws, **level_kws})\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 264, "name": "_update_legend_data", "kind": "def", "category": "function", "info": " def _update_legend_data(\n self,\n update,\n var,\n verbosity,\n title,\n title_kws,\n attr_names,\n other_props,\n ):\n\n brief_ticks = 6\n mapper = getattr(self, f\"_{var}_map\")\n\n brief = mapper.map_type == \"numeric\" and (\n verbosity == \"brief\"\n or (verbosity == \"auto\" and len(mapper.levels) > brief_ticks)\n )\n if brief:\n if isinstance(mapper.norm, mpl.colors.LogNorm):\n locator = mpl.ticker.LogLocator(numticks=brief_ticks)\n else:\n locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)\n limits = min(mapper.levels), max(mapper.levels)\n levels, formatted_levels = locator_to_legend_entries(\n locator, limits, self.plot_data[var].infer_objects().dtype\n )\n elif mapper.levels is None:\n levels = formatted_levels = []\n else:\n levels = formatted_levels = mapper.levels\n\n if not title and self.variables.get(var, None) is not None:\n update((self.variables[var], \"title\"), self.variables[var], **title_kws)\n\n other_props = {} if other_props is None else other_props\n\n for level, formatted_level in zip(levels, formatted_levels):\n if level is not None:\n attr = mapper(level)\n if isinstance(attr_names, list):\n attr = {name: attr for name in attr_names}\n elif attr_names is not None:\n attr = {attr_names: attr}\n attr.update({k: v[level] for k, v in other_props.items() if level in v})\n update(self.variables[var], formatted_level, **attr)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 288, "name": "locator_to_legend_entries", "kind": "ref", "category": "function", "info": " levels, formatted_levels = locator_to_legend_entries(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 289, "name": "infer_objects", "kind": "ref", "category": "function", "info": " locator, limits, self.plot_data[var].infer_objects().dtype\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 303, "name": "mapper", "kind": "ref", "category": "function", "info": " attr = mapper(level)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 312, "name": "_LinePlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 370, "name": "EstimateAggregator", "kind": "ref", "category": "function", "info": " agg = EstimateAggregator(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 389, "name": "iter_data", "kind": "ref", "category": "function", "info": " for sub_vars, sub_data in self.iter_data(grouping_vars, from_comp_data=True):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 394, "name": "sort_values", "kind": "ref", "category": "function", "info": " sub_data = sub_data.sort_values(sort_cols)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 404, "name": "groupby", "kind": "ref", "category": "function", "info": " grouped = sub_data.groupby(orient, sort=self.sort)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 407, "name": "apply", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, other).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 407, "name": "reset_index", "kind": "ref", "category": "function", "info": " sub_data = grouped.apply(agg, other).reset_index()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 414, "name": "_log_scaled", "kind": "ref", "category": "function", "info": " if self._log_scaled(var):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 422, "name": "groupby", "kind": "ref", "category": "function", "info": " for _, unit_data in sub_data.groupby(\"units\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 430, "name": "set_color", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 430, "name": "_hue_map", "kind": "ref", "category": "function", "info": " line.set_color(self._hue_map(sub_vars[\"hue\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 433, "name": "set_linewidth", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 433, "name": "_size_map", "kind": "ref", "category": "function", "info": " line.set_linewidth(self._size_map(sub_vars[\"size\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 436, "name": "_style_map", "kind": "ref", "category": "function", "info": " attributes = self._style_map(sub_vars[\"style\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 438, "name": "set_dashes", "kind": "ref", "category": "function", "info": " line.set_dashes(attributes[\"dashes\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 440, "name": "set_marker", "kind": "ref", "category": "function", "info": " line.set_marker(attributes[\"marker\"])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 442, "name": "get_color", "kind": "ref", "category": "function", "info": " line_color = line.get_color()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 443, "name": "get_alpha", "kind": "ref", "category": "function", "info": " line_alpha = line.get_alpha()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 444, "name": "get_solid_capstyle", "kind": "ref", "category": "function", "info": " line_capstyle = line.get_solid_capstyle()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 455, "name": "func", "kind": "ref", "category": "function", "info": " func(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 476, "name": "get_children", "kind": "ref", "category": "function", "info": " for obj in ebars.get_children():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 478, "name": "set_capstyle", "kind": "ref", "category": "function", "info": " obj.set_capstyle(line_capstyle)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 481, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 483, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 484, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 487, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 490, "name": "_ScatterPlotter", "kind": "def", "category": "class", "info": "__init__\tplot"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 512, "name": "dropna", "kind": "ref", "category": "function", "info": " data = self.plot_data.dropna()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 526, "name": "_style_map", "kind": "ref", "category": "function", "info": " example_marker = self._style_map(example_level, \"marker\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 534, "name": "MarkerStyle", "kind": "ref", "category": "function", "info": " m = mpl.markers.MarkerStyle(m)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 535, "name": "is_filled", "kind": "ref", "category": "function", "info": " if m.is_filled():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 544, "name": "set_facecolors", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 544, "name": "_hue_map", "kind": "ref", "category": "function", "info": " points.set_facecolors(self._hue_map(data[\"hue\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 547, "name": "set_sizes", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 547, "name": "_size_map", "kind": "ref", "category": "function", "info": " points.set_sizes(self._size_map(data[\"size\"]))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 550, "name": "_style_map", "kind": "ref", "category": "function", "info": " p = [self._style_map(val, \"path\") for val in data[\"style\"]]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 551, "name": "set_paths", "kind": "ref", "category": "function", "info": " points.set_paths(p)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 556, "name": "get_sizes", "kind": "ref", "category": "function", "info": " sizes = points.get_sizes()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 557, "name": "set_linewidths", "kind": "ref", "category": "function", "info": " points.set_linewidths(.08 * np.sqrt(np.percentile(sizes, 10)))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 560, "name": "_add_axis_labels", "kind": "ref", "category": "function", "info": " self._add_axis_labels(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 562, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " self.add_legend_data(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 563, "name": "get_legend_handles_labels", "kind": "ref", "category": "function", "info": " handles, _ = ax.get_legend_handles_labels()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 566, "name": "adjust_legend_subtitles", "kind": "ref", "category": "function", "info": " adjust_legend_subtitles(legend)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 569, "name": "lineplot", "kind": "def", "category": "function", "info": "def lineplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n dashes=True, markers=None, style_order=None,\n estimator=\"mean\", errorbar=(\"ci\", 95), n_boot=1000, seed=None,\n orient=\"x\", sort=True, err_style=\"band\", err_kws=None,\n legend=\"auto\", ci=\"deprecated\", ax=None, **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 581, "name": "_deprecate_ci", "kind": "ref", "category": "function", "info": " errorbar = _deprecate_ci(errorbar, ci)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 583, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _LinePlotter.get_semantics(locals())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 584, "name": "_LinePlotter", "kind": "ref", "category": "function", "info": " p = _LinePlotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 591, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 592, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 593, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 604, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 609, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.plot, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 698, "name": "scatterplot", "kind": "def", "category": "function", "info": "def scatterplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=True, style_order=None, legend=\"auto\", ax=None,\n **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 707, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables = _ScatterPlotter.get_semantics(locals())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 708, "name": "_ScatterPlotter", "kind": "ref", "category": "function", "info": " p = _ScatterPlotter(data=data, variables=variables, legend=legend)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 710, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 711, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 712, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, order=style_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 720, "name": "_attach", "kind": "ref", "category": "function", "info": " p._attach(ax)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 725, "name": "_default_color", "kind": "ref", "category": "function", "info": " kwargs[\"color\"] = _default_color(ax.scatter, hue, color, kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 791, "name": "relplot", "kind": "def", "category": "function", "info": "def relplot(\n data=None, *,\n x=None, y=None, hue=None, size=None, style=None, units=None,\n row=None, col=None, col_wrap=None, row_order=None, col_order=None,\n palette=None, hue_order=None, hue_norm=None,\n sizes=None, size_order=None, size_norm=None,\n markers=None, dashes=None, style_order=None,\n legend=\"auto\", kind=\"scatter\", height=5, aspect=1, facet_kws=None,\n **kwargs\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 828, "name": "plotter", "kind": "ref", "category": "function", "info": " p = plotter(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 830, "name": "get_semantics", "kind": "ref", "category": "function", "info": " variables=plotter.get_semantics(locals()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 833, "name": "map_hue", "kind": "ref", "category": "function", "info": " p.map_hue(palette=palette, order=hue_order, norm=hue_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 834, "name": "map_size", "kind": "ref", "category": "function", "info": " p.map_size(sizes=sizes, order=size_order, norm=size_norm)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 835, "name": "map_style", "kind": "ref", "category": "function", "info": " p.map_style(markers=markers, dashes=dashes, order=style_order)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 853, "name": "_style_map", "kind": "ref", "category": "function", "info": " markers = {k: p._style_map(k, \"marker\") for k in style_order}\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 857, "name": "_style_map", "kind": "ref", "category": "function", "info": " dashes = {k: p._style_map(k, \"dashes\") for k in style_order}\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 882, "name": "assign_variables", "kind": "ref", "category": "function", "info": " p.assign_variables(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 908, "name": "rename", "kind": "ref", "category": "function", "info": " full_data = p.plot_data.rename(columns=new_cols)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 912, "name": "FacetGrid", "kind": "ref", "category": "function", "info": " g = FacetGrid(\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 913, "name": "dropna", "kind": "ref", "category": "function", "info": " data=full_data.dropna(axis=1, how=\"all\"),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 921, "name": "map_dataframe", "kind": "ref", "category": "function", "info": " g.map_dataframe(func, **plot_kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 925, "name": "set_axis_labels", "kind": "ref", "category": "function", "info": " g.set_axis_labels(variables.get(\"x\") or \"\", variables.get(\"y\") or \"\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 932, "name": "add_legend_data", "kind": "ref", "category": "function", "info": " p.add_legend_data(g.axes.flat[0])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 934, "name": "add_legend", "kind": "ref", "category": "function", "info": " g.add_legend(legend_data=p.legend_data,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/relational.py", "rel_fname": "seaborn/relational.py", "line": 944, "name": "rename", "kind": "ref", "category": "function", "info": " grid_data = g.data.rename(columns=orig_cols)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 27, "name": "ci_to_errsize", "kind": "def", "category": "function", "info": "def ci_to_errsize(cis, heights):\n \"\"\"Convert intervals to error arguments relative to plot heights.\n\n Parameters\n ----------\n cis : 2 x n sequence\n sequence of confidence interval limits\n heights : n sequence\n sequence of plot heights\n\n Returns\n -------\n errsize : 2 x n array\n sequence of error size relative to height values in correct\n format as argument for plt.bar\n\n \"\"\"\n cis = np.atleast_2d(cis).reshape(2, -1)\n heights = np.atleast_1d(heights)\n errsize = []\n for i, (low, high) in enumerate(np.transpose(cis)):\n h = heights[i]\n elow = h - low\n ehigh = high - h\n errsize.append([elow, ehigh])\n\n errsize = np.asarray(errsize).T\n return errsize\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 57, "name": "_normal_quantile_func", "kind": "def", "category": "function", "info": "def _normal_quantile_func(q):\n \"\"\"\n Compute the quantile function of the standard normal distribution.\n\n This wrapper exists because we are dropping scipy as a mandatory dependency\n but statistics.NormalDist was added to the standard library in 3.8.\n\n \"\"\"\n try:\n from statistics import NormalDist\n qf = np.vectorize(NormalDist().inv_cdf)\n except ImportError:\n try:\n from scipy.stats import norm\n qf = norm.ppf\n except ImportError:\n msg = (\n \"Standard normal quantile functions require either Python>=3.8 or scipy\"\n )\n raise RuntimeError(msg)\n return qf(q)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 77, "name": "qf", "kind": "ref", "category": "function", "info": " return qf(q)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 80, "name": "_draw_figure", "kind": "def", "category": "function", "info": "def _draw_figure(fig):\n \"\"\"Force draw of a matplotlib figure, accounting for back-compat.\"\"\"\n # See https://github.com/matplotlib/matplotlib/issues/19197 for context\n fig.canvas.draw()\n if fig.stale:\n try:\n fig.draw(fig.canvas.get_renderer())\n except AttributeError:\n pass\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 86, "name": "get_renderer", "kind": "ref", "category": "function", "info": " fig.draw(fig.canvas.get_renderer())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 91, "name": "_default_color", "kind": "def", "category": "function", "info": "def _default_color(method, hue, color, kws, saturation=1):\n \"\"\"If needed, get a default color by using the matplotlib property cycle.\"\"\"\n\n if hue is not None:\n # This warning is probably user-friendly, but it's currently triggered\n # in a FacetGrid context and I don't want to mess with that logic right now\n # if color is not None:\n # msg = \"`color` is ignored when `hue` is assigned.\"\n # warnings.warn(msg)\n return None\n\n kws = kws.copy()\n kws.pop(\"label\", None)\n\n if color is not None:\n if saturation < 1:\n color = desaturate(color, saturation)\n return color\n\n elif method.__name__ == \"plot\":\n\n color = _normalize_kwargs(kws, mpl.lines.Line2D).get(\"color\")\n scout, = method([], [], scalex=False, scaley=False, color=color)\n color = scout.get_color()\n scout.remove()\n\n elif method.__name__ == \"scatter\":\n\n # Matplotlib will raise if the size of x/y don't match s/c,\n # and the latter might be in the kws dict\n scout_size = max(\n np.atleast_1d(kws.get(key, [])).shape[0]\n for key in [\"s\", \"c\", \"fc\", \"facecolor\", \"facecolors\"]\n )\n scout_x = scout_y = np.full(scout_size, np.nan)\n\n scout = method(scout_x, scout_y, **kws)\n facecolors = scout.get_facecolors()\n\n if not len(facecolors):\n # Handle bug in matplotlib <= 3.2 (I think)\n # This will limit the ability to use non color= kwargs to specify\n # a color in versions of matplotlib with the bug, but trying to\n # work out what the user wanted by re-implementing the broken logic\n # of inspecting the kwargs is probably too brittle.\n single_color = False\n else:\n single_color = np.unique(facecolors, axis=0).shape[0] == 1\n\n # Allow the user to specify an array of colors through various kwargs\n if \"c\" not in kws and single_color:\n color = to_rgb(facecolors[0])\n\n scout.remove()\n\n elif method.__name__ == \"bar\":\n\n # bar() needs masked, not empty data, to generate a patch\n scout, = method([np.nan], [np.nan], **kws)\n color = to_rgb(scout.get_facecolor())\n scout.remove()\n # Axes.bar adds both a patch and a container\n method.__self__.containers.pop(-1)\n\n elif method.__name__ == \"fill_between\":\n\n kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n scout = method([], [], **kws)\n facecolor = scout.get_facecolor()\n color = to_rgb(facecolor[0])\n scout.remove()\n\n if saturation < 1:\n color = desaturate(color, saturation)\n\n return color\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 107, "name": "desaturate", "kind": "ref", "category": "function", "info": " color = desaturate(color, saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 112, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " color = _normalize_kwargs(kws, mpl.lines.Line2D).get(\"color\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 113, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([], [], scalex=False, scaley=False, color=color)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 114, "name": "get_color", "kind": "ref", "category": "function", "info": " color = scout.get_color()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 127, "name": "method", "kind": "ref", "category": "function", "info": " scout = method(scout_x, scout_y, **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 128, "name": "get_facecolors", "kind": "ref", "category": "function", "info": " facecolors = scout.get_facecolors()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 149, "name": "method", "kind": "ref", "category": "function", "info": " scout, = method([np.nan], [np.nan], **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 150, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " color = to_rgb(scout.get_facecolor())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 157, "name": "_normalize_kwargs", "kind": "ref", "category": "function", "info": " kws = _normalize_kwargs(kws, mpl.collections.PolyCollection)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 158, "name": "method", "kind": "ref", "category": "function", "info": " scout = method([], [], **kws)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 159, "name": "get_facecolor", "kind": "ref", "category": "function", "info": " facecolor = scout.get_facecolor()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 164, "name": "desaturate", "kind": "ref", "category": "function", "info": " color = desaturate(color, saturation)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 169, "name": "desaturate", "kind": "def", "category": "function", "info": "def desaturate(color, prop):\n \"\"\"Decrease the saturation channel of a color by some percent.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n prop : float\n saturation channel of color will be multiplied by this value\n\n Returns\n -------\n new_color : rgb tuple\n desaturated color code in RGB tuple representation\n\n \"\"\"\n # Check inputs\n if not 0 <= prop <= 1:\n raise ValueError(\"prop must be between 0 and 1\")\n\n # Get rgb tuple rep\n rgb = to_rgb(color)\n\n # Short circuit to avoid floating point issues\n if prop == 1:\n return rgb\n\n # Convert to hls\n h, l, s = colorsys.rgb_to_hls(*rgb)\n\n # Desaturate the saturation channel\n s *= prop\n\n # Convert back to rgb\n new_color = colorsys.hls_to_rgb(h, l, s)\n\n return new_color\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 208, "name": "saturate", "kind": "def", "category": "function", "info": "def saturate(color):\n \"\"\"Return a fully saturated color with the same hue.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n\n Returns\n -------\n new_color : rgb tuple\n saturated color code in RGB tuple representation\n\n \"\"\"\n return set_hls_values(color, s=1)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 222, "name": "set_hls_values", "kind": "ref", "category": "function", "info": " return set_hls_values(color, s=1)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 225, "name": "set_hls_values", "kind": "def", "category": "function", "info": "def set_hls_values(color, h=None, l=None, s=None): # noqa\n \"\"\"Independently manipulate the h, l, or s channels of a color.\n\n Parameters\n ----------\n color : matplotlib color\n hex, rgb-tuple, or html color name\n h, l, s : floats between 0 and 1, or None\n new values for each channel in hls space\n\n Returns\n -------\n new_color : rgb tuple\n new color code in RGB tuple representation\n\n \"\"\"\n # Get an RGB tuple representation\n rgb = to_rgb(color)\n vals = list(colorsys.rgb_to_hls(*rgb))\n for i, val in enumerate([h, l, s]):\n if val is not None:\n vals[i] = val\n\n rgb = colorsys.hls_to_rgb(*vals)\n return rgb\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 252, "name": "axlabel", "kind": "def", "category": "function", "info": "def axlabel(xlabel, ylabel, **kwargs):\n \"\"\"Grab current axis and label it.\n\n DEPRECATED: will be removed in a future version.\n\n \"\"\"\n msg = \"This function is deprecated and will be removed in a future version\"\n warnings.warn(msg, FutureWarning)\n ax = plt.gca()\n ax.set_xlabel(xlabel, **kwargs)\n ax.set_ylabel(ylabel, **kwargs)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 261, "name": "set_xlabel", "kind": "ref", "category": "function", "info": " ax.set_xlabel(xlabel, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 262, "name": "set_ylabel", "kind": "ref", "category": "function", "info": " ax.set_ylabel(ylabel, **kwargs)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 265, "name": "remove_na", "kind": "def", "category": "function", "info": "def remove_na(vector):\n \"\"\"Helper method for removing null values from data vectors.\n\n Parameters\n ----------\n vector : vector object\n Must implement boolean masking with [] subscript syntax.\n\n Returns\n -------\n clean_clean : same type as ``vector``\n Vector of data with null values removed. May be a copy or a view.\n\n \"\"\"\n return vector[pd.notnull(vector)]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 282, "name": "get_color_cycle", "kind": "def", "category": "function", "info": "def get_color_cycle():\n \"\"\"Return the list of colors in the current matplotlib color cycle\n\n Parameters\n ----------\n None\n\n Returns\n -------\n colors : list\n List of matplotlib colors in the current cycle, or dark gray if\n the current color cycle is empty.\n \"\"\"\n cycler = mpl.rcParams['axes.prop_cycle']\n return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 296, "name": "by_key", "kind": "ref", "category": "function", "info": " return cycler.by_key()['color'] if 'color' in cycler.keys else [\".15\"]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 299, "name": "despine", "kind": "def", "category": "function", "info": "def despine(fig=None, ax=None, top=True, right=True, left=False,\n bottom=False, offset=None, trim=False):\n \"\"\"Remove the top and right spines from plot(s).\n\n fig : matplotlib figure, optional\n Figure to despine all axes of, defaults to the current figure.\n ax : matplotlib axes, optional\n Specific axes object to despine. Ignored if fig is provided.\n top, right, left, bottom : boolean, optional\n If True, remove that spine.\n offset : int or dict, optional\n Absolute distance, in points, spines should be moved away\n from the axes (negative values move spines inward). A single value\n applies to all spines; a dict can be used to set offset values per\n side.\n trim : bool, optional\n If True, limit spines to the smallest and largest major tick\n on each non-despined axis.\n\n Returns\n -------\n None\n\n \"\"\"\n # Get references to the axes we want\n if fig is None and ax is None:\n axes = plt.gcf().axes\n elif fig is not None:\n axes = fig.axes\n elif ax is not None:\n axes = [ax]\n\n for ax_i in axes:\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n # Toggle the spine objects\n is_visible = not locals()[side]\n ax_i.spines[side].set_visible(is_visible)\n if offset is not None and is_visible:\n try:\n val = offset.get(side, 0)\n except AttributeError:\n val = offset\n ax_i.spines[side].set_position(('outward', val))\n\n # Potentially move the ticks\n if left and not right:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.yaxis.minorTicks\n )\n ax_i.yaxis.set_ticks_position(\"right\")\n for t in ax_i.yaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.yaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if bottom and not top:\n maj_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.majorTicks\n )\n min_on = any(\n t.tick1line.get_visible()\n for t in ax_i.xaxis.minorTicks\n )\n ax_i.xaxis.set_ticks_position(\"top\")\n for t in ax_i.xaxis.majorTicks:\n t.tick2line.set_visible(maj_on)\n for t in ax_i.xaxis.minorTicks:\n t.tick2line.set_visible(min_on)\n\n if trim:\n # clip off the parts of the spines that extend past major ticks\n xticks = np.asarray(ax_i.get_xticks())\n if xticks.size:\n firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n xticks)[0]\n lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n xticks)[-1]\n ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n ax_i.spines['top'].set_bounds(firsttick, lasttick)\n newticks = xticks.compress(xticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_xticks(newticks)\n\n yticks = np.asarray(ax_i.get_yticks())\n if yticks.size:\n firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n yticks)[0]\n lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n yticks)[-1]\n ax_i.spines['left'].set_bounds(firsttick, lasttick)\n ax_i.spines['right'].set_bounds(firsttick, lasttick)\n newticks = yticks.compress(yticks <= lasttick)\n newticks = newticks.compress(newticks >= firsttick)\n ax_i.set_yticks(newticks)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 335, "name": "set_visible", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_visible(is_visible)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 341, "name": "set_position", "kind": "ref", "category": "function", "info": " ax_i.spines[side].set_position(('outward', val))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 346, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 350, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 353, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.yaxis.set_ticks_position(\"right\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 355, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 357, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 361, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 365, "name": "get_visible", "kind": "ref", "category": "function", "info": " t.tick1line.get_visible()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 368, "name": "set_ticks_position", "kind": "ref", "category": "function", "info": " ax_i.xaxis.set_ticks_position(\"top\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 370, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(maj_on)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 372, "name": "set_visible", "kind": "ref", "category": "function", "info": " t.tick2line.set_visible(min_on)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 376, "name": "get_xticks", "kind": "ref", "category": "function", "info": " xticks = np.asarray(ax_i.get_xticks())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 378, "name": "get_xlim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(xticks >= min(ax_i.get_xlim()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 380, "name": "get_xlim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(xticks <= max(ax_i.get_xlim()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 382, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['bottom'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 383, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['top'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 386, "name": "set_xticks", "kind": "ref", "category": "function", "info": " ax_i.set_xticks(newticks)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 388, "name": "get_yticks", "kind": "ref", "category": "function", "info": " yticks = np.asarray(ax_i.get_yticks())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 390, "name": "get_ylim", "kind": "ref", "category": "function", "info": " firsttick = np.compress(yticks >= min(ax_i.get_ylim()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 392, "name": "get_ylim", "kind": "ref", "category": "function", "info": " lasttick = np.compress(yticks <= max(ax_i.get_ylim()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 394, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['left'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 395, "name": "set_bounds", "kind": "ref", "category": "function", "info": " ax_i.spines['right'].set_bounds(firsttick, lasttick)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 398, "name": "set_yticks", "kind": "ref", "category": "function", "info": " ax_i.set_yticks(newticks)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 401, "name": "move_legend", "kind": "def", "category": "function", "info": "def move_legend(obj, loc, **kwargs):\n \"\"\"\n Recreate a plot's legend at a new location.\n\n The name is a slight misnomer. Matplotlib legends do not expose public\n control over their position parameters. So this function creates a new legend,\n copying over the data from the original object, which is then removed.\n\n Parameters\n ----------\n obj : the object with the plot\n This argument can be either a seaborn or matplotlib object:\n\n - :class:`seaborn.FacetGrid` or :class:`seaborn.PairGrid`\n - :class:`matplotlib.axes.Axes` or :class:`matplotlib.figure.Figure`\n\n loc : str or int\n Location argument, as in :meth:`matplotlib.axes.Axes.legend`.\n\n kwargs\n Other keyword arguments are passed to :meth:`matplotlib.axes.Axes.legend`.\n\n Examples\n --------\n\n .. include:: ../docstrings/move_legend.rst\n\n \"\"\"\n # This is a somewhat hackish solution that will hopefully be obviated by\n # upstream improvements to matplotlib legends that make them easier to\n # modify after creation.\n\n from seaborn.axisgrid import Grid # Avoid circular import\n\n # Locate the legend object and a method to recreate the legend\n if isinstance(obj, Grid):\n old_legend = obj.legend\n legend_func = obj.figure.legend\n elif isinstance(obj, mpl.axes.Axes):\n old_legend = obj.legend_\n legend_func = obj.legend\n elif isinstance(obj, mpl.figure.Figure):\n if obj.legends:\n old_legend = obj.legends[-1]\n else:\n old_legend = None\n legend_func = obj.legend\n else:\n err = \"`obj` must be a seaborn Grid or matplotlib Axes or Figure instance.\"\n raise TypeError(err)\n\n if old_legend is None:\n err = f\"{obj} has no legend attached.\"\n raise ValueError(err)\n\n # Extract the components of the legend we need to reuse\n # Import here to avoid a circular import\n from seaborn._compat import get_legend_handles\n handles = get_legend_handles(old_legend)\n labels = [t.get_text() for t in old_legend.get_texts()]\n\n # Extract legend properties that can be passed to the recreation method\n # (Vexingly, these don't all round-trip)\n legend_kws = inspect.signature(mpl.legend.Legend).parameters\n props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n\n # Delegate default bbox_to_anchor rules to matplotlib\n props.pop(\"bbox_to_anchor\")\n\n # Try to propagate the existing title and font properties; respect new ones too\n title = props.pop(\"title\")\n if \"title\" in kwargs:\n title.set_text(kwargs.pop(\"title\"))\n title_kwargs = {k: v for k, v in kwargs.items() if k.startswith(\"title_\")}\n for key, val in title_kwargs.items():\n title.set(**{key[6:]: val})\n kwargs.pop(key)\n\n # Try to respect the frame visibility\n kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n\n # Remove the old legend and create the new one\n props.update(kwargs)\n old_legend.remove()\n new_legend = legend_func(handles, labels, loc=loc, **props)\n new_legend.set_title(title.get_text(), title.get_fontproperties())\n\n # Let the Grid object continue to track the correct legend object\n if isinstance(obj, Grid):\n obj._legend = new_legend\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 459, "name": "get_legend_handles", "kind": "ref", "category": "function", "info": " handles = get_legend_handles(old_legend)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 460, "name": "get_text", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 460, "name": "get_texts", "kind": "ref", "category": "function", "info": " labels = [t.get_text() for t in old_legend.get_texts()]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 465, "name": "properties", "kind": "ref", "category": "function", "info": " props = {k: v for k, v in old_legend.properties().items() if k in legend_kws}\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 473, "name": "set_text", "kind": "ref", "category": "function", "info": " title.set_text(kwargs.pop(\"title\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 480, "name": "get_visible", "kind": "ref", "category": "function", "info": " kwargs.setdefault(\"frameon\", old_legend.legendPatch.get_visible())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 485, "name": "legend_func", "kind": "ref", "category": "function", "info": " new_legend = legend_func(handles, labels, loc=loc, **props)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 486, "name": "set_title", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 486, "name": "get_text", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 486, "name": "get_fontproperties", "kind": "ref", "category": "function", "info": " new_legend.set_title(title.get_text(), title.get_fontproperties())\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 493, "name": "_kde_support", "kind": "def", "category": "function", "info": "def _kde_support(data, bw, gridsize, cut, clip):\n \"\"\"Establish support for a kernel density estimate.\"\"\"\n support_min = max(data.min() - bw * cut, clip[0])\n support_max = min(data.max() + bw * cut, clip[1])\n support = np.linspace(support_min, support_max, gridsize)\n\n return support\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 502, "name": "ci", "kind": "def", "category": "function", "info": "def ci(a, which=95, axis=None):\n \"\"\"Return a percentile range from an array of values.\"\"\"\n p = 50 - which / 2, 50 + which / 2\n return np.nanpercentile(a, p, axis)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 508, "name": "get_dataset_names", "kind": "def", "category": "function", "info": "def get_dataset_names():\n \"\"\"Report available example datasets, useful for reporting issues.\n\n Requires an internet connection.\n\n \"\"\"\n with urlopen(DATASET_NAMES_URL) as resp:\n txt = resp.read()\n\n dataset_names = [name.strip() for name in txt.decode().split(\"\\n\")]\n return list(filter(None, dataset_names))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 517, "name": "decode", "kind": "ref", "category": "function", "info": " dataset_names = [name.strip() for name in txt.decode().split(\"\\n\")]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 521, "name": "get_data_home", "kind": "def", "category": "function", "info": "def get_data_home(data_home=None):\n \"\"\"Return a path to the cache directory for example datasets.\n\n This directory is used by :func:`load_dataset`.\n\n If the ``data_home`` argument is not provided, it will use a directory\n specified by the `SEABORN_DATA` environment variable (if it exists)\n or otherwise default to an OS-appropriate user cache location.\n\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n data_home = os.path.expanduser(data_home)\n if not os.path.exists(data_home):\n os.makedirs(data_home)\n return data_home\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 532, "name": "user_cache_dir", "kind": "ref", "category": "function", "info": " data_home = os.environ.get(\"SEABORN_DATA\", user_cache_dir(\"seaborn\"))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 533, "name": "expanduser", "kind": "ref", "category": "function", "info": " data_home = os.path.expanduser(data_home)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 534, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(data_home):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 539, "name": "load_dataset", "kind": "def", "category": "function", "info": "def load_dataset(name, cache=True, data_home=None, **kws):\n \"\"\"Load an example dataset from the online repository (requires internet).\n\n This function provides quick access to a small number of example datasets\n that are useful for documenting seaborn or generating reproducible examples\n for bug reports. It is not necessary for normal usage.\n\n Note that some of the datasets have a small amount of preprocessing applied\n to define a proper ordering for categorical variables.\n\n Use :func:`get_dataset_names` to see a list of available datasets.\n\n Parameters\n ----------\n name : str\n Name of the dataset (``{name}.csv`` on\n https://github.com/mwaskom/seaborn-data).\n cache : boolean, optional\n If True, try to load from the local cache first, and save to the cache\n if a download is required.\n data_home : string, optional\n The directory in which to cache data; see :func:`get_data_home`.\n kws : keys and values, optional\n Additional keyword arguments are passed to passed through to\n :func:`pandas.read_csv`.\n\n Returns\n -------\n df : :class:`pandas.DataFrame`\n Tabular data, possibly with some preprocessing applied.\n\n \"\"\"\n # A common beginner mistake is to assume that one's personal data needs\n # to be passed through this function to be usable with seaborn.\n # Let's provide a more helpful error than you would otherwise get.\n if isinstance(name, pd.DataFrame):\n err = (\n \"This function accepts only strings (the name of an example dataset). \"\n \"You passed a pandas DataFrame. If you have your own dataset, \"\n \"it is not necessary to use this function before plotting.\"\n )\n raise TypeError(err)\n\n url = f\"{DATASET_SOURCE}/{name}.csv\"\n\n if cache:\n cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n if not os.path.exists(cache_path):\n if name not in get_dataset_names():\n raise ValueError(f\"'{name}' is not one of the example datasets.\")\n urlretrieve(url, cache_path)\n full_path = cache_path\n else:\n full_path = url\n\n df = pd.read_csv(full_path, **kws)\n\n if df.iloc[-1].isnull().all():\n df = df.iloc[:-1]\n\n # Set some columns as a categorical type with ordered levels\n\n if name == \"tips\":\n df[\"day\"] = pd.Categorical(df[\"day\"], [\"Thur\", \"Fri\", \"Sat\", \"Sun\"])\n df[\"sex\"] = pd.Categorical(df[\"sex\"], [\"Male\", \"Female\"])\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"Lunch\", \"Dinner\"])\n df[\"smoker\"] = pd.Categorical(df[\"smoker\"], [\"Yes\", \"No\"])\n\n elif name == \"flights\":\n months = df[\"month\"].str[:3]\n df[\"month\"] = pd.Categorical(months, months.unique())\n\n elif name == \"exercise\":\n df[\"time\"] = pd.Categorical(df[\"time\"], [\"1 min\", \"15 min\", \"30 min\"])\n df[\"kind\"] = pd.Categorical(df[\"kind\"], [\"rest\", \"walking\", \"running\"])\n df[\"diet\"] = pd.Categorical(df[\"diet\"], [\"no fat\", \"low fat\"])\n\n elif name == \"titanic\":\n df[\"class\"] = pd.Categorical(df[\"class\"], [\"First\", \"Second\", \"Third\"])\n df[\"deck\"] = pd.Categorical(df[\"deck\"], list(\"ABCDEFG\"))\n\n elif name == \"penguins\":\n df[\"sex\"] = df[\"sex\"].str.title()\n\n elif name == \"diamonds\":\n df[\"color\"] = pd.Categorical(\n df[\"color\"], [\"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\"],\n )\n df[\"clarity\"] = pd.Categorical(\n df[\"clarity\"], [\"IF\", \"VVS1\", \"VVS2\", \"VS1\", \"VS2\", \"SI1\", \"SI2\", \"I1\"],\n )\n df[\"cut\"] = pd.Categorical(\n df[\"cut\"], [\"Ideal\", \"Premium\", \"Very Good\", \"Good\", \"Fair\"],\n )\n\n elif name == \"taxis\":\n df[\"pickup\"] = pd.to_datetime(df[\"pickup\"])\n df[\"dropoff\"] = pd.to_datetime(df[\"dropoff\"])\n\n elif name == \"seaice\":\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n elif name == \"dowjones\":\n df[\"Date\"] = pd.to_datetime(df[\"Date\"])\n\n return df\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 585, "name": "get_data_home", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 585, "name": "basename", "kind": "ref", "category": "function", "info": " cache_path = os.path.join(get_data_home(data_home), os.path.basename(url))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 586, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(cache_path):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 587, "name": "get_dataset_names", "kind": "ref", "category": "function", "info": " if name not in get_dataset_names():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 647, "name": "axis_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axis_ticklabels_overlap(labels):\n \"\"\"Return a boolean for whether the list of ticklabels have overlaps.\n\n Parameters\n ----------\n labels : list of matplotlib ticklabels\n\n Returns\n -------\n overlap : boolean\n True if any of the labels overlap.\n\n \"\"\"\n if not labels:\n return False\n try:\n bboxes = [l.get_window_extent() for l in labels]\n overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n return max(overlaps) > 1\n except RuntimeError:\n # Issue on macos backend raises an error in the above code\n return False\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 663, "name": "get_window_extent", "kind": "ref", "category": "function", "info": " bboxes = [l.get_window_extent() for l in labels]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 664, "name": "count_overlaps", "kind": "ref", "category": "function", "info": " overlaps = [b.count_overlaps(bboxes) for b in bboxes]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 671, "name": "axes_ticklabels_overlap", "kind": "def", "category": "function", "info": "def axes_ticklabels_overlap(ax):\n \"\"\"Return booleans for whether the x and y ticklabels on an Axes overlap.\n\n Parameters\n ----------\n ax : matplotlib Axes\n\n Returns\n -------\n x_overlap, y_overlap : booleans\n True when the labels on that axis overlap.\n\n \"\"\"\n return (axis_ticklabels_overlap(ax.get_xticklabels()),\n axis_ticklabels_overlap(ax.get_yticklabels()))\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 684, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 684, "name": "get_xticklabels", "kind": "ref", "category": "function", "info": " return (axis_ticklabels_overlap(ax.get_xticklabels()),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 685, "name": "axis_ticklabels_overlap", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 685, "name": "get_yticklabels", "kind": "ref", "category": "function", "info": " axis_ticklabels_overlap(ax.get_yticklabels()))\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 688, "name": "locator_to_legend_entries", "kind": "def", "category": "function", "info": "def locator_to_legend_entries(locator, limits, dtype):\n \"\"\"Return levels and formatted levels for brief numeric legends.\"\"\"\n raw_levels = locator.tick_values(*limits).astype(dtype)\n\n # The locator can return ticks outside the limits, clip them here\n raw_levels = [l for l in raw_levels if l >= limits[0] and l <= limits[1]]\n\n class dummy_axis:\n def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n # Avoid having an offset/scientific notation which we don't currently\n # have any way of representing in the legend\n formatter.set_useOffset(False)\n formatter.set_scientific(False)\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 690, "name": "tick_values", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 690, "name": "astype", "kind": "ref", "category": "function", "info": " raw_levels = locator.tick_values(*limits).astype(dtype)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 695, "name": "dummy_axis", "kind": "def", "category": "class", "info": "get_view_interval"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 696, "name": "get_view_interval", "kind": "def", "category": "function", "info": " def get_view_interval(self):\n return limits\n\n if isinstance(locator, mpl.ticker.LogLocator):\n formatter = mpl.ticker.LogFormatter()\n else:\n formatter = mpl.ticker.ScalarFormatter()\n # Avoid having an offset/scientific notation which we don't currently\n # have any way of representing in the legend\n formatter.set_useOffset(False)\n formatter.set_scientific(False)\n formatter.axis = dummy_axis()\n\n # TODO: The following two lines should be replaced\n # once pinned matplotlib>=3.1.0 with:\n # formatted_levels = formatter.format_ticks(raw_levels)\n formatter.set_locs(raw_levels)\n formatted_levels = [formatter(x) for x in raw_levels]\n\n return raw_levels, formatted_levels\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 705, "name": "set_useOffset", "kind": "ref", "category": "function", "info": " formatter.set_useOffset(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 706, "name": "set_scientific", "kind": "ref", "category": "function", "info": " formatter.set_scientific(False)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 707, "name": "dummy_axis", "kind": "ref", "category": "function", "info": " formatter.axis = dummy_axis()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 712, "name": "set_locs", "kind": "ref", "category": "function", "info": " formatter.set_locs(raw_levels)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 713, "name": "formatter", "kind": "ref", "category": "function", "info": " formatted_levels = [formatter(x) for x in raw_levels]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 718, "name": "relative_luminance", "kind": "def", "category": "function", "info": "def relative_luminance(color):\n \"\"\"Calculate the relative luminance of a color according to W3C standards\n\n Parameters\n ----------\n color : matplotlib color or sequence of matplotlib colors\n Hex code, rgb-tuple, or html color name.\n\n Returns\n -------\n luminance : float(s) between 0 and 1\n\n \"\"\"\n rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)\n lum = rgb.dot([.2126, .7152, .0722])\n try:\n return lum.item()\n except ValueError:\n return lum\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 731, "name": "to_rgba_array", "kind": "ref", "category": "function", "info": " rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 735, "name": "item", "kind": "ref", "category": "function", "info": " return lum.item()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 740, "name": "to_utf8", "kind": "def", "category": "function", "info": "def to_utf8(obj):\n \"\"\"Return a string representing a Python object.\n\n Strings (i.e. type ``str``) are returned unchanged.\n\n Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.\n\n For other objects, the method ``__str__()`` is called, and the result is\n returned as a string.\n\n Parameters\n ----------\n obj : object\n Any Python object\n\n Returns\n -------\n s : str\n UTF-8-decoded string representation of ``obj``\n\n \"\"\"\n if isinstance(obj, str):\n return obj\n try:\n return obj.decode(encoding=\"utf-8\")\n except AttributeError: # obj is not bytes-like\n return str(obj)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 764, "name": "decode", "kind": "ref", "category": "function", "info": " return obj.decode(encoding=\"utf-8\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 769, "name": "_normalize_kwargs", "kind": "def", "category": "function", "info": "def _normalize_kwargs(kws, artist):\n \"\"\"Wrapper for mpl.cbook.normalize_kwargs that supports <= 3.2.1.\"\"\"\n _alias_map = {\n 'color': ['c'],\n 'linewidth': ['lw'],\n 'linestyle': ['ls'],\n 'facecolor': ['fc'],\n 'edgecolor': ['ec'],\n 'markerfacecolor': ['mfc'],\n 'markeredgecolor': ['mec'],\n 'markeredgewidth': ['mew'],\n 'markersize': ['ms']\n }\n try:\n kws = normalize_kwargs(kws, artist)\n except AttributeError:\n kws = normalize_kwargs(kws, _alias_map)\n return kws\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 789, "name": "_check_argument", "kind": "def", "category": "function", "info": "def _check_argument(param, options, value, prefix=False):\n \"\"\"Raise if value for param is not in options.\"\"\"\n if prefix and value is not None:\n failure = not any(value.startswith(p) for p in options if isinstance(p, str))\n else:\n failure = value not in options\n if failure:\n raise ValueError(\n f\"The value for `{param}` must be one of {options}, \"\n f\"but {repr(value)} was passed.\"\n )\n return value\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 803, "name": "_assign_default_kwargs", "kind": "def", "category": "function", "info": "def _assign_default_kwargs(kws, call_func, source_func):\n \"\"\"Assign default kwargs for call_func using values from source_func.\"\"\"\n # This exists so that axes-level functions and figure-level functions can\n # both call a Plotter method while having the default kwargs be defined in\n # the signature of the axes-level function.\n # An alternative would be to have a decorator on the method that sets its\n # defaults based on those defined in the axes-level function.\n # Then the figure-level function would not need to worry about defaults.\n # I am not sure which is better.\n needed = inspect.signature(call_func).parameters\n defaults = inspect.signature(source_func).parameters\n\n for param in needed:\n if param in defaults and param not in kws:\n kws[param] = defaults[param].default\n\n return kws\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 822, "name": "adjust_legend_subtitles", "kind": "def", "category": "function", "info": "def adjust_legend_subtitles(legend):\n \"\"\"\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n \"\"\"\n # Legend title not in rcParams until 3.0\n font_size = plt.rcParams.get(\"legend.title_fontsize\", None)\n hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n for hpack in hpackers:\n draw_area, text_area = hpack.get_children()\n handles = draw_area.get_children()\n if not all(artist.get_visible() for artist in handles):\n draw_area.set_width(0)\n for text in text_area.get_children():\n if font_size is not None:\n text.set_size(font_size)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 831, "name": "get_children", "kind": "ref", "category": "function", "info": " hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 833, "name": "get_children", "kind": "ref", "category": "function", "info": " draw_area, text_area = hpack.get_children()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 834, "name": "get_children", "kind": "ref", "category": "function", "info": " handles = draw_area.get_children()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 835, "name": "get_visible", "kind": "ref", "category": "function", "info": " if not all(artist.get_visible() for artist in handles):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 836, "name": "set_width", "kind": "ref", "category": "function", "info": " draw_area.set_width(0)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 837, "name": "get_children", "kind": "ref", "category": "function", "info": " for text in text_area.get_children():\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 839, "name": "set_size", "kind": "ref", "category": "function", "info": " text.set_size(font_size)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 842, "name": "_deprecate_ci", "kind": "def", "category": "function", "info": "def _deprecate_ci(errorbar, ci):\n \"\"\"\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n \"\"\"\n if ci is not deprecated and ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n msg = (\n \"\\n\\nThe `ci` parameter is deprecated. \"\n f\"Use `errorbar={repr(errorbar)}` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return errorbar\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 867, "name": "_get_transform_functions", "kind": "def", "category": "function", "info": "def _get_transform_functions(ax, axis):\n \"\"\"Return the forward and inverse transforms for a given axis.\"\"\"\n axis_obj = getattr(ax, f\"{axis}axis\")\n transform = axis_obj.get_transform()\n return transform.transform, transform.inverted().transform\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 870, "name": "get_transform", "kind": "ref", "category": "function", "info": " transform = axis_obj.get_transform()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 871, "name": "inverted", "kind": "ref", "category": "function", "info": " return transform.transform, transform.inverted().transform\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 875, "name": "_disable_autolayout", "kind": "def", "category": "function", "info": "def _disable_autolayout():\n \"\"\"Context manager for preventing rc-controlled auto-layout behavior.\"\"\"\n # This is a workaround for an issue in matplotlib, for details see\n # https://github.com/mwaskom/seaborn/issues/2914\n # The only affect of this rcParam is to set the default value for\n # layout= in plt.figure, so we could just do that instead.\n # But then we would need to own the complexity of the transition\n # from tight_layout=True -> layout=\"tight\". This seems easier,\n # but can be removed when (if) that is simpler on the matplotlib side,\n # or if the layout algorithms are improved to handle figure legends.\n orig_val = mpl.rcParams[\"figure.autolayout\"]\n try:\n mpl.rcParams[\"figure.autolayout\"] = False\n yield\n finally:\n mpl.rcParams[\"figure.autolayout\"] = orig_val\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 893, "name": "_version_predates", "kind": "def", "category": "function", "info": "def _version_predates(lib: ModuleType, version: str) -> bool:\n \"\"\"Helper function for checking version compatibility.\"\"\"\n return Version(lib.__version__) < Version(version)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 895, "name": "Version", "kind": "ref", "category": "function", "info": " return Version(lib.__version__) < Version(version)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/utils.py", "rel_fname": "seaborn/utils.py", "line": 895, "name": "Version", "kind": "ref", "category": "function", "info": " return Version(lib.__version__) < Version(version)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 7, "name": "interact", "kind": "def", "category": "function", "info": " def interact(f):\n msg = \"Interactive palettes require `ipywidgets`, which is not installed.\"\n raise ImportError(msg)\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 21, "name": "_init_mutable_colormap", "kind": "def", "category": "function", "info": "def _init_mutable_colormap():\n \"\"\"Create a matplotlib colormap that will be updated by the widgets.\"\"\"\n greys = color_palette(\"Greys\", 256)\n cmap = LinearSegmentedColormap.from_list(\"interactive\", greys)\n cmap._init()\n cmap._set_extremes()\n return cmap\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 23, "name": "color_palette", "kind": "ref", "category": "function", "info": " greys = color_palette(\"Greys\", 256)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 30, "name": "_update_lut", "kind": "def", "category": "function", "info": "def _update_lut(cmap, colors):\n \"\"\"Change the LUT values in a matplotlib colormap in-place.\"\"\"\n cmap._lut[:256] = colors\n cmap._set_extremes()\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 36, "name": "_show_cmap", "kind": "def", "category": "function", "info": "def _show_cmap(cmap):\n \"\"\"Show a continuous matplotlib colormap.\"\"\"\n from .rcmod import axes_style # Avoid circular import\n with axes_style(\"white\"):\n f, ax = plt.subplots(figsize=(8.25, .75))\n ax.set(xticks=[], yticks=[])\n x = np.linspace(0, 1, 256)[np.newaxis, :]\n ax.pcolormesh(x, cmap=cmap)\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 39, "name": "axes_style", "kind": "ref", "category": "function", "info": " with axes_style(\"white\"):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 46, "name": "choose_colorbrewer_palette", "kind": "def", "category": "function", "info": "def choose_colorbrewer_palette(data_type, as_cmap=False):\n \"\"\"Select a palette from the ColorBrewer set.\n\n These palettes are built into matplotlib and can be used by name in\n many seaborn functions, or by passing the object returned by this function.\n\n Parameters\n ----------\n data_type : {'sequential', 'diverging', 'qualitative'}\n This describes the kind of data you want to visualize. See the seaborn\n color palette docs for more information about how to choose this value.\n Note that you can pass substrings (e.g. 'q' for 'qualitative.\n\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n diverging_palette : Create a diverging palette from selected colors.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n\n \"\"\"\n if data_type.startswith(\"q\") and as_cmap:\n raise ValueError(\"Qualitative palettes cannot be colormaps.\")\n\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if data_type.startswith(\"s\"):\n opts = [\"Greys\", \"Reds\", \"Greens\", \"Blues\", \"Oranges\", \"Purples\",\n \"BuGn\", \"BuPu\", \"GnBu\", \"OrRd\", \"PuBu\", \"PuRd\", \"RdPu\", \"YlGn\",\n \"PuBuGn\", \"YlGnBu\", \"YlOrBr\", \"YlOrRd\"]\n variants = [\"regular\", \"reverse\", \"dark\"]\n\n @interact\n def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 83, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 92, "name": "choose_sequential", "kind": "def", "category": "function", "info": " def choose_sequential(name=opts, n=(2, 18),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n elif variant == \"dark\":\n name += \"_d\"\n\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"d\"):\n opts = [\"RdBu\", \"RdGy\", \"PRGn\", \"PiYG\", \"BrBG\",\n \"RdYlBu\", \"RdYlGn\", \"Spectral\"]\n variants = [\"regular\", \"reverse\"]\n\n @interact\n def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 93, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 101, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 102, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 103, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 105, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 106, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 114, "name": "choose_diverging", "kind": "def", "category": "function", "info": " def choose_diverging(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1),\n variant=variants):\n if variant == \"reverse\":\n name += \"_r\"\n if as_cmap:\n colors = color_palette(name, 256, desat)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n elif data_type.startswith(\"q\"):\n opts = [\"Set1\", \"Set2\", \"Set3\", \"Paired\", \"Accent\",\n \"Pastel1\", \"Pastel2\", \"Dark2\"]\n\n @interact\n def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 115, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 120, "name": "color_palette", "kind": "ref", "category": "function", "info": " colors = color_palette(name, 256, desat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 121, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 122, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 124, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 125, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 132, "name": "choose_qualitative", "kind": "def", "category": "function", "info": " def choose_qualitative(name=opts, n=(2, 16),\n desat=FloatSlider(min=0, max=1, value=1)):\n pal[:] = color_palette(name, n, desat)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 133, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " desat=FloatSlider(min=0, max=1, value=1)):\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 134, "name": "color_palette", "kind": "ref", "category": "function", "info": " pal[:] = color_palette(name, n, desat)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 135, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 142, "name": "choose_dark_palette", "kind": "def", "category": "function", "info": "def choose_dark_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a dark sequential palette.\n\n This corresponds with the :func:`dark_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`dark_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n dark_palette : Create a sequential palette with dark low values.\n light_palette : Create a sequential palette with bright low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 175, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 179, "name": "choose_dark_palette_rgb", "kind": "def", "category": "function", "info": " def choose_dark_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = dark_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 185, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 186, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 187, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 189, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 190, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 194, "name": "choose_dark_palette_hls", "kind": "def", "category": "function", "info": " def choose_dark_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = dark_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 200, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 201, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 202, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 204, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 205, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 209, "name": "choose_dark_palette_husl", "kind": "def", "category": "function", "info": " def choose_dark_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = dark_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = dark_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 215, "name": "dark_palette", "kind": "ref", "category": "function", "info": " colors = dark_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 216, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 217, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 219, "name": "dark_palette", "kind": "ref", "category": "function", "info": " pal[:] = dark_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 220, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 227, "name": "choose_light_palette", "kind": "def", "category": "function", "info": "def choose_light_palette(input=\"husl\", as_cmap=False):\n \"\"\"Launch an interactive widget to create a light sequential palette.\n\n This corresponds with the :func:`light_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n input : {'husl', 'hls', 'rgb'}\n Color space for defining the seed value. Note that the default is\n different than the default input for :func:`light_palette`.\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n light_palette : Create a sequential palette with bright low values.\n dark_palette : Create a sequential palette with dark low values.\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n if input == \"rgb\":\n @interact\n def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 260, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 264, "name": "choose_light_palette_rgb", "kind": "def", "category": "function", "info": " def choose_light_palette_rgb(r=(0., 1.),\n g=(0., 1.),\n b=(0., 1.),\n n=(3, 17)):\n color = r, g, b\n if as_cmap:\n colors = light_palette(color, 256, input=\"rgb\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"rgb\")\n palplot(pal)\n\n elif input == \"hls\":\n @interact\n def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 270, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"rgb\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 271, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 272, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 274, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"rgb\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 275, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 279, "name": "choose_light_palette_hls", "kind": "def", "category": "function", "info": " def choose_light_palette_hls(h=(0., 1.),\n l=(0., 1.), # noqa: E741\n s=(0., 1.),\n n=(3, 17)):\n color = h, l, s\n if as_cmap:\n colors = light_palette(color, 256, input=\"hls\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"hls\")\n palplot(pal)\n\n elif input == \"husl\":\n @interact\n def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 285, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"hls\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 286, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 287, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 289, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"hls\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 290, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 294, "name": "choose_light_palette_husl", "kind": "def", "category": "function", "info": " def choose_light_palette_husl(h=(0, 359),\n s=(0, 99),\n l=(0, 99), # noqa: E741\n n=(3, 17)):\n color = h, s, l\n if as_cmap:\n colors = light_palette(color, 256, input=\"husl\")\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = light_palette(color, n, input=\"husl\")\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 300, "name": "light_palette", "kind": "ref", "category": "function", "info": " colors = light_palette(color, 256, input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 301, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 302, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 304, "name": "light_palette", "kind": "ref", "category": "function", "info": " pal[:] = light_palette(color, n, input=\"husl\")\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 305, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 312, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": "def choose_diverging_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to choose a diverging color palette.\n\n This corresponds with the :func:`diverging_palette` function. This kind\n of palette is good for data that range between interesting low values\n and interesting high values with a meaningful midpoint. (For example,\n change scores relative to some baseline value).\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n diverging_palette : Create a diverging color palette or colormap.\n choose_colorbrewer_palette : Interactively choose palettes from the\n colorbrewer set, including diverging palettes.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 342, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 345, "name": "choose_diverging_palette", "kind": "def", "category": "function", "info": " def choose_diverging_palette(\n h_neg=IntSlider(min=0,\n max=359,\n value=220),\n h_pos=IntSlider(min=0,\n max=359,\n value=10),\n s=IntSlider(min=0, max=99, value=74),\n l=IntSlider(min=0, max=99, value=50), # noqa: E741\n sep=IntSlider(min=1, max=50, value=10),\n n=(2, 16),\n center=[\"light\", \"dark\"]\n ):\n if as_cmap:\n colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n _update_lut(cmap, colors)\n _show_cmap(cmap)\n else:\n pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n\n\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 346, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_neg=IntSlider(min=0,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 349, "name": "IntSlider", "kind": "ref", "category": "function", "info": " h_pos=IntSlider(min=0,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 352, "name": "IntSlider", "kind": "ref", "category": "function", "info": " s=IntSlider(min=0, max=99, value=74),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 353, "name": "IntSlider", "kind": "ref", "category": "function", "info": " l=IntSlider(min=0, max=99, value=50), # noqa: E741\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 354, "name": "IntSlider", "kind": "ref", "category": "function", "info": " sep=IntSlider(min=1, max=50, value=10),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 359, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " colors = diverging_palette(h_neg, h_pos, s, l, sep, 256, center)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 360, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, colors)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 361, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 363, "name": "diverging_palette", "kind": "ref", "category": "function", "info": " pal[:] = diverging_palette(h_neg, h_pos, s, l, sep, n, center)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 364, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 371, "name": "choose_cubehelix_palette", "kind": "def", "category": "function", "info": "def choose_cubehelix_palette(as_cmap=False):\n \"\"\"Launch an interactive widget to create a sequential cubehelix palette.\n\n This corresponds with the :func:`cubehelix_palette` function. This kind\n of palette is good for data that range between relatively uninteresting\n low values and interesting high values. The cubehelix system allows the\n palette to have more hue variance across the range, which can be helpful\n for distinguishing a wider range of values.\n\n Requires IPython 2+ and must be used in the notebook.\n\n Parameters\n ----------\n as_cmap : bool\n If True, the return value is a matplotlib colormap rather than a\n list of discrete colors.\n\n Returns\n -------\n pal or cmap : list of colors or matplotlib colormap\n Object that can be passed to plotting functions.\n\n See Also\n --------\n cubehelix_palette : Create a sequential palette or colormap using the\n cubehelix system.\n\n \"\"\"\n pal = []\n if as_cmap:\n cmap = _init_mutable_colormap()\n\n @interact\n def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 401, "name": "_init_mutable_colormap", "kind": "ref", "category": "function", "info": " cmap = _init_mutable_colormap()\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 404, "name": "choose_cubehelix", "kind": "def", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n start=FloatSlider(min=0, max=3, value=0),\n rot=FloatSlider(min=-1, max=1, value=.4),\n gamma=FloatSlider(min=0, max=5, value=1),\n hue=FloatSlider(min=0, max=1, value=.8),\n light=FloatSlider(min=0, max=1, value=.85),\n dark=FloatSlider(min=0, max=1, value=.15),\n reverse=False):\n\n if as_cmap:\n colors = cubehelix_palette(256, start, rot, gamma,\n hue, light, dark, reverse)\n _update_lut(cmap, np.c_[colors, np.ones(256)])\n _show_cmap(cmap)\n else:\n pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n hue, light, dark, reverse)\n palplot(pal)\n\n if as_cmap:\n return cmap\n return pal\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 404, "name": "IntSlider", "kind": "ref", "category": "function", "info": " def choose_cubehelix(n_colors=IntSlider(min=2, max=16, value=9),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 405, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " start=FloatSlider(min=0, max=3, value=0),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 406, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " rot=FloatSlider(min=-1, max=1, value=.4),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 407, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " gamma=FloatSlider(min=0, max=5, value=1),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 408, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " hue=FloatSlider(min=0, max=1, value=.8),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 409, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " light=FloatSlider(min=0, max=1, value=.85),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 410, "name": "FloatSlider", "kind": "ref", "category": "function", "info": " dark=FloatSlider(min=0, max=1, value=.15),\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 414, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " colors = cubehelix_palette(256, start, rot, gamma,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 416, "name": "_update_lut", "kind": "ref", "category": "function", "info": " _update_lut(cmap, np.c_[colors, np.ones(256)])\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 417, "name": "_show_cmap", "kind": "ref", "category": "function", "info": " _show_cmap(cmap)\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 419, "name": "cubehelix_palette", "kind": "ref", "category": "function", "info": " pal[:] = cubehelix_palette(n_colors, start, rot, gamma,\n"}, {"fname": "playground/b07b095a-d4cc-4a90-9e1b-d5bc50b354c8/seaborn/seaborn/widgets.py", "rel_fname": "seaborn/widgets.py", "line": 421, "name": "palplot", "kind": "ref", "category": "function", "info": " palplot(pal)\n"}] \ No newline at end of file diff --git a/tags_pallets__flask-4045.json b/tags_pallets__flask-4045.json new file mode 100644 index 0000000000000000000000000000000000000000..ee02f2ad5cae303708462d62db6f18d5dbe8752d --- /dev/null +++ b/tags_pallets__flask-4045.json @@ -0,0 +1 @@ +[{"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 9, "name": "get_version", "kind": "ref", "category": "function", "info": "release, version = get_version(\"Flask\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 41, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"Donate\", \"https://palletsprojects.com/donate\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 42, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"PyPI Releases\", \"https://pypi.org/project/Flask/\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 43, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"Source Code\", \"https://github.com/pallets/flask/\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 44, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"Issue Tracker\", \"https://github.com/pallets/flask/issues/\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 45, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"Website\", \"https://palletsprojects.com/p/flask/\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 46, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"Twitter\", \"https://twitter.com/PalletsTeam\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 47, "name": "ProjectLink", "kind": "ref", "category": "function", "info": " ProjectLink(\"Chat\", \"https://discord.gg/pallets\"),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 68, "name": "github_link", "kind": "def", "category": "function", "info": "def github_link(name, rawtext, text, lineno, inliner, options=None, content=None):\n app = inliner.document.settings.env.app\n release = app.config.release\n base_url = \"https://github.com/pallets/flask/tree/\"\n\n if text.endswith(\">\"):\n words, text = text[:-1].rsplit(\"<\", 1)\n words = words.strip()\n else:\n words = None\n\n if packaging.version.parse(release).is_devrelease:\n url = f\"{base_url}main/{text}\"\n else:\n url = f\"{base_url}{release}/{text}\"\n\n if words is None:\n words = url\n\n from docutils.nodes import reference\n from docutils.parsers.rst.roles import set_classes\n\n options = options or {}\n set_classes(options)\n node = reference(rawtext, words, refuri=url, **options)\n return [node], []\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 91, "name": "set_classes", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 92, "name": "reference", "kind": "ref", "category": "function", "info": " node = reference(rawtext, words, refuri=url, **options)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 96, "name": "setup", "kind": "def", "category": "function", "info": "def setup(app):\n app.add_role(\"gh\", github_link)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/docs/conf.py", "rel_fname": "docs/conf.py", "line": 97, "name": "add_role", "kind": "ref", "category": "function", "info": " app.add_role(\"gh\", github_link)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/javascript/js_example/__init__.py", "rel_fname": "examples/javascript/js_example/__init__.py", "line": 2, "name": "Flask", "kind": "ref", "category": "function", "info": "app = Flask(__name__)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/javascript/js_example/views.py", "rel_fname": "examples/javascript/js_example/views.py", "line": 7, "name": "route", "kind": "ref", "category": "function", "info": "@app.route(\"/\", defaults={\"js\": \"plain\"})\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/javascript/js_example/views.py", "rel_fname": "examples/javascript/js_example/views.py", "line": 8, "name": "route", "kind": "ref", "category": "function", "info": "@app.route(\"/\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/javascript/js_example/views.py", "rel_fname": "examples/javascript/js_example/views.py", "line": 10, "name": "render_template", "kind": "ref", "category": "function", "info": " return render_template(f\"{js}.html\", js=js)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/javascript/js_example/views.py", "rel_fname": "examples/javascript/js_example/views.py", "line": 13, "name": "route", "kind": "ref", "category": "function", "info": "@app.route(\"/add\", methods=[\"POST\"])\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/javascript/js_example/views.py", "rel_fname": "examples/javascript/js_example/views.py", "line": 17, "name": "jsonify", "kind": "ref", "category": "function", "info": " return jsonify(result=a + b)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 5, "name": "create_app", "kind": "def", "category": "function", "info": "def create_app(test_config=None):\n \"\"\"Create and configure an instance of the Flask application.\"\"\"\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n # a default secret that should be overridden by instance config\n SECRET_KEY=\"dev\",\n # store the database in the instance folder\n DATABASE=os.path.join(app.instance_path, \"flaskr.sqlite\"),\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile(\"config.py\", silent=True)\n else:\n # load the test config if passed in\n app.config.update(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n @app.route(\"/hello\")\n def hello():\n return \"Hello, World!\"\n\n # register the database commands\n from flaskr import db\n\n db.init_app(app)\n\n # apply the blueprints to the app\n from flaskr import auth, blog\n\n app.register_blueprint(auth.bp)\n app.register_blueprint(blog.bp)\n\n # make url_for('index') == url_for('blog.index')\n # in another app, you might define a separate main index here with\n # app.route, while giving the blog blueprint a url_prefix, but for\n # the tutorial the blog will be the main index\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 7, "name": "Flask", "kind": "ref", "category": "function", "info": " app = Flask(__name__, instance_relative_config=True)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 8, "name": "from_mapping", "kind": "ref", "category": "function", "info": " app.config.from_mapping(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 17, "name": "from_pyfile", "kind": "ref", "category": "function", "info": " app.config.from_pyfile(\"config.py\", silent=True)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 28, "name": "route", "kind": "ref", "category": "function", "info": " @app.route(\"/hello\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 29, "name": "hello", "kind": "def", "category": "function", "info": " def hello():\n return \"Hello, World!\"\n\n # register the database commands\n from flaskr import db\n\n db.init_app(app)\n\n # apply the blueprints to the app\n from flaskr import auth, blog\n\n app.register_blueprint(auth.bp)\n app.register_blueprint(blog.bp)\n\n # make url_for('index') == url_for('blog.index')\n # in another app, you might define a separate main index here with\n # app.route, while giving the blog blueprint a url_prefix, but for\n # the tutorial the blog will be the main index\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 35, "name": "init_app", "kind": "ref", "category": "function", "info": " db.init_app(app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 40, "name": "register_blueprint", "kind": "ref", "category": "function", "info": " app.register_blueprint(auth.bp)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 41, "name": "register_blueprint", "kind": "ref", "category": "function", "info": " app.register_blueprint(blog.bp)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/__init__.py", "rel_fname": "examples/tutorial/flaskr/__init__.py", "line": 47, "name": "add_url_rule", "kind": "ref", "category": "function", "info": " app.add_url_rule(\"/\", endpoint=\"index\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 15, "name": "Blueprint", "kind": "ref", "category": "function", "info": "bp = Blueprint(\"auth\", __name__, url_prefix=\"/auth\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 18, "name": "login_required", "kind": "def", "category": "function", "info": "def login_required(view):\n \"\"\"View decorator that redirects anonymous users to the login page.\"\"\"\n\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 22, "name": "wrapped_view", "kind": "def", "category": "function", "info": " def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for(\"auth.login\"))\n\n return view(**kwargs)\n\n return wrapped_view\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 24, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"auth.login\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 24, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"auth.login\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 26, "name": "view", "kind": "ref", "category": "function", "info": " return view(**kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 32, "name": "load_logged_in_user", "kind": "def", "category": "function", "info": "def load_logged_in_user():\n \"\"\"If a user id is stored in the session, load the user object from\n the database into ``g.user``.\"\"\"\n user_id = session.get(\"user_id\")\n\n if user_id is None:\n g.user = None\n else:\n g.user = (\n get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n )\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 41, "name": "get_db", "kind": "ref", "category": "function", "info": " get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 41, "name": "execute", "kind": "ref", "category": "function", "info": " get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 41, "name": "fetchone", "kind": "ref", "category": "function", "info": " get_db().execute(\"SELECT * FROM user WHERE id = ?\", (user_id,)).fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 45, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"/register\", methods=(\"GET\", \"POST\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 46, "name": "register", "kind": "def", "category": "function", "info": "def register():\n \"\"\"Register a new user.\n\n Validates that the username is not already taken. Hashes the\n password for security.\n \"\"\"\n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n db = get_db()\n error = None\n\n if not username:\n error = \"Username is required.\"\n elif not password:\n error = \"Password is required.\"\n elif (\n db.execute(\"SELECT id FROM user WHERE username = ?\", (username,)).fetchone()\n is not None\n ):\n error = f\"User {username} is already registered.\"\n\n if error is None:\n # the name is available, store it in the database and go to\n # the login page\n db.execute(\n \"INSERT INTO user (username, password) VALUES (?, ?)\",\n (username, generate_password_hash(password)),\n )\n db.commit()\n return redirect(url_for(\"auth.login\"))\n\n flash(error)\n\n return render_template(\"auth/register.html\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 55, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 63, "name": "execute", "kind": "ref", "category": "function", "info": " db.execute(\"SELECT id FROM user WHERE username = ?\", (username,)).fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 63, "name": "fetchone", "kind": "ref", "category": "function", "info": " db.execute(\"SELECT id FROM user WHERE username = ?\", (username,)).fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 71, "name": "execute", "kind": "ref", "category": "function", "info": " db.execute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 75, "name": "commit", "kind": "ref", "category": "function", "info": " db.commit()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 76, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"auth.login\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 76, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"auth.login\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 78, "name": "flash", "kind": "ref", "category": "function", "info": " flash(error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 80, "name": "render_template", "kind": "ref", "category": "function", "info": " return render_template(\"auth/register.html\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 83, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"/login\", methods=(\"GET\", \"POST\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 84, "name": "login", "kind": "def", "category": "function", "info": "def login():\n \"\"\"Log in a registered user by adding the user id to the session.\"\"\"\n if request.method == \"POST\":\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n db = get_db()\n error = None\n user = db.execute(\n \"SELECT * FROM user WHERE username = ?\", (username,)\n ).fetchone()\n\n if user is None:\n error = \"Incorrect username.\"\n elif not check_password_hash(user[\"password\"], password):\n error = \"Incorrect password.\"\n\n if error is None:\n # store the user id in a new session and return to the index\n session.clear()\n session[\"user_id\"] = user[\"id\"]\n return redirect(url_for(\"index\"))\n\n flash(error)\n\n return render_template(\"auth/login.html\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 89, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 91, "name": "execute", "kind": "ref", "category": "function", "info": " user = db.execute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 93, "name": "fetchone", "kind": "ref", "category": "function", "info": " ).fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 104, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 104, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 106, "name": "flash", "kind": "ref", "category": "function", "info": " flash(error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 108, "name": "render_template", "kind": "ref", "category": "function", "info": " return render_template(\"auth/login.html\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 111, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"/logout\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 112, "name": "logout", "kind": "def", "category": "function", "info": "def logout():\n \"\"\"Clear the current session, including the stored user id.\"\"\"\n session.clear()\n return redirect(url_for(\"index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 115, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/auth.py", "rel_fname": "examples/tutorial/flaskr/auth.py", "line": 115, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 12, "name": "Blueprint", "kind": "ref", "category": "function", "info": "bp = Blueprint(\"blog\", __name__)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 15, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"/\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 18, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 19, "name": "execute", "kind": "ref", "category": "function", "info": " posts = db.execute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 23, "name": "fetchall", "kind": "ref", "category": "function", "info": " ).fetchall()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 24, "name": "render_template", "kind": "ref", "category": "function", "info": " return render_template(\"blog/index.html\", posts=posts)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 27, "name": "get_post", "kind": "def", "category": "function", "info": "def get_post(id, check_author=True):\n \"\"\"Get a post and its author by id.\n\n Checks that the id exists and optionally that the current user is\n the author.\n\n :param id: id of post to get\n :param check_author: require the current user to be the author\n :return: the post with author information\n :raise 404: if a post with the given id doesn't exist\n :raise 403: if the current user isn't the author\n \"\"\"\n post = (\n get_db()\n .execute(\n \"SELECT p.id, title, body, created, author_id, username\"\n \" FROM post p JOIN user u ON p.author_id = u.id\"\n \" WHERE p.id = ?\",\n (id,),\n )\n .fetchone()\n )\n\n if post is None:\n abort(404, f\"Post id {id} doesn't exist.\")\n\n if check_author and post[\"author_id\"] != g.user[\"id\"]:\n abort(403)\n\n return post\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 40, "name": "get_db", "kind": "ref", "category": "function", "info": " get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 41, "name": "execute", "kind": "ref", "category": "function", "info": " .execute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 47, "name": "fetchone", "kind": "ref", "category": "function", "info": " .fetchone()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 59, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"/create\", methods=(\"GET\", \"POST\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 61, "name": "create", "kind": "def", "category": "function", "info": "def create():\n \"\"\"Create a new post for the current user.\"\"\"\n if request.method == \"POST\":\n title = request.form[\"title\"]\n body = request.form[\"body\"]\n error = None\n\n if not title:\n error = \"Title is required.\"\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n \"INSERT INTO post (title, body, author_id) VALUES (?, ?, ?)\",\n (title, body, g.user[\"id\"]),\n )\n db.commit()\n return redirect(url_for(\"blog.index\"))\n\n return render_template(\"blog/create.html\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 72, "name": "flash", "kind": "ref", "category": "function", "info": " flash(error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 74, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 75, "name": "execute", "kind": "ref", "category": "function", "info": " db.execute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 79, "name": "commit", "kind": "ref", "category": "function", "info": " db.commit()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 80, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 80, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 82, "name": "render_template", "kind": "ref", "category": "function", "info": " return render_template(\"blog/create.html\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 85, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"//update\", methods=(\"GET\", \"POST\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 89, "name": "get_post", "kind": "ref", "category": "function", "info": " post = get_post(id)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 100, "name": "flash", "kind": "ref", "category": "function", "info": " flash(error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 102, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 103, "name": "execute", "kind": "ref", "category": "function", "info": " db.execute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 106, "name": "commit", "kind": "ref", "category": "function", "info": " db.commit()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 107, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 107, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 109, "name": "render_template", "kind": "ref", "category": "function", "info": " return render_template(\"blog/update.html\", post=post)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 112, "name": "route", "kind": "ref", "category": "function", "info": "@bp.route(\"//delete\", methods=(\"POST\",))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 114, "name": "delete", "kind": "def", "category": "function", "info": "def delete(id):\n \"\"\"Delete a post.\n\n Ensures that the post exists and that the logged in user is the\n author of the post.\n \"\"\"\n get_post(id)\n db = get_db()\n db.execute(\"DELETE FROM post WHERE id = ?\", (id,))\n db.commit()\n return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 120, "name": "get_post", "kind": "ref", "category": "function", "info": " get_post(id)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 121, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 122, "name": "execute", "kind": "ref", "category": "function", "info": " db.execute(\"DELETE FROM post WHERE id = ?\", (id,))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 123, "name": "commit", "kind": "ref", "category": "function", "info": " db.commit()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 124, "name": "redirect", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/blog.py", "rel_fname": "examples/tutorial/flaskr/blog.py", "line": 124, "name": "url_for", "kind": "ref", "category": "function", "info": " return redirect(url_for(\"blog.index\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 8, "name": "get_db", "kind": "def", "category": "function", "info": "def get_db():\n \"\"\"Connect to the application's configured database. The connection\n is unique for each request and will be reused if this is called\n again.\n \"\"\"\n if \"db\" not in g:\n g.db = sqlite3.connect(\n current_app.config[\"DATABASE\"], detect_types=sqlite3.PARSE_DECLTYPES\n )\n g.db.row_factory = sqlite3.Row\n\n return g.db\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 22, "name": "close_db", "kind": "def", "category": "function", "info": "def close_db(e=None):\n \"\"\"If this request connected to the database, close the\n connection.\n \"\"\"\n db = g.pop(\"db\", None)\n\n if db is not None:\n db.close()\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 29, "name": "close", "kind": "ref", "category": "function", "info": " db.close()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 32, "name": "init_db", "kind": "def", "category": "function", "info": "def init_db():\n \"\"\"Clear existing data and create new tables.\"\"\"\n db = get_db()\n\n with current_app.open_resource(\"schema.sql\") as f:\n db.executescript(f.read().decode(\"utf8\"))\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 34, "name": "get_db", "kind": "ref", "category": "function", "info": " db = get_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 36, "name": "open_resource", "kind": "ref", "category": "function", "info": " with current_app.open_resource(\"schema.sql\") as f:\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 37, "name": "executescript", "kind": "ref", "category": "function", "info": " db.executescript(f.read().decode(\"utf8\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 37, "name": "read", "kind": "ref", "category": "function", "info": " db.executescript(f.read().decode(\"utf8\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 37, "name": "decode", "kind": "ref", "category": "function", "info": " db.executescript(f.read().decode(\"utf8\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 42, "name": "init_db_command", "kind": "def", "category": "function", "info": "def init_db_command():\n \"\"\"Clear existing data and create new tables.\"\"\"\n init_db()\n click.echo(\"Initialized the database.\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 44, "name": "init_db", "kind": "ref", "category": "function", "info": " init_db()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 48, "name": "init_app", "kind": "def", "category": "function", "info": "def init_app(app):\n \"\"\"Register database functions with the Flask app. This is called by\n the application factory.\n \"\"\"\n app.teardown_appcontext(close_db)\n app.cli.add_command(init_db_command)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 52, "name": "teardown_appcontext", "kind": "ref", "category": "function", "info": " app.teardown_appcontext(close_db)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/examples/tutorial/flaskr/db.py", "rel_fname": "examples/tutorial/flaskr/db.py", "line": 53, "name": "add_command", "kind": "ref", "category": "function", "info": " app.cli.add_command(init_db_command)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/__main__.py", "rel_fname": "src/flask/__main__.py", "line": 2, "name": "main", "kind": "ref", "category": "function", "info": "main()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 93, "name": "_make_timedelta", "kind": "def", "category": "function", "info": "def _make_timedelta(value: t.Optional[timedelta]) -> t.Optional[timedelta]:\n if value is None or isinstance(value, timedelta):\n return value\n\n return timedelta(seconds=value)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 100, "name": "Flask", "kind": "def", "category": "class", "info": "__init__\t_is_setup_finished\tname\tpropagate_exceptions\tpreserve_context_on_exception\tlogger\tjinja_env\tgot_first_request\tmake_config\tauto_find_instance_path\topen_instance_resource\ttemplates_auto_reload\ttemplates_auto_reload\tcreate_jinja_environment\tcreate_global_jinja_loader\tselect_jinja_autoescape\tupdate_template_context\tmake_shell_context\tdebug\tdebug\trun\ttest_client\ttest_cli_runner\tregister_blueprint\titer_blueprints\tadd_url_rule\ttemplate_filter\tadd_template_filter\ttemplate_test\tadd_template_test\ttemplate_global\tadd_template_global\tbefore_first_request\tteardown_appcontext\tshell_context_processor\t_find_error_handler\thandle_http_exception\ttrap_http_exception\thandle_user_exception\thandle_exception\tlog_exception\traise_routing_exception\tdispatch_request\tfull_dispatch_request\tfinalize_request\ttry_trigger_before_first_request_functions\tmake_default_options_response\tshould_ignore_error\tensure_sync\tasync_to_sync\tmake_response\tcreate_url_adapter\tinject_url_defaults\thandle_url_build_error\tpreprocess_request\tprocess_response\tdo_teardown_request\tdo_teardown_appcontext\tapp_context\trequest_context\ttest_request_context\twsgi_app\t__call__\t_request_blueprints"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 247, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " testing = ConfigAttribute(\"TESTING\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 255, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " secret_key = ConfigAttribute(\"SECRET_KEY\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 261, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " session_cookie_name = ConfigAttribute(\"SESSION_COOKIE_NAME\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 270, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " permanent_session_lifetime = ConfigAttribute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 284, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " send_file_max_age_default = ConfigAttribute(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 296, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " use_x_sendfile = ConfigAttribute(\"USE_X_SENDFILE\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 383, "name": "SecureCookieSessionInterface", "kind": "ref", "category": "function", "info": " session_interface = SecureCookieSessionInterface()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 407, "name": "auto_find_instance_path", "kind": "ref", "category": "function", "info": " instance_path = self.auto_find_instance_path()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 408, "name": "isabs", "kind": "ref", "category": "function", "info": " elif not os.path.isabs(instance_path):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 422, "name": "make_config", "kind": "ref", "category": "function", "info": " self.config = self.make_config(instance_relative_config)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 490, "name": "url_map_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 512, "name": "add_url_rule", "kind": "ref", "category": "function", "info": " self.add_url_rule(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 516, "name": "self_ref", "kind": "ref", "category": "function", "info": " view_func=lambda **kw: self_ref().send_static_file(**kw), # type: ignore # noqa: B950\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 516, "name": "send_static_file", "kind": "ref", "category": "function", "info": " view_func=lambda **kw: self_ref().send_static_file(**kw), # type: ignore # noqa: B950\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 523, "name": "_is_setup_finished", "kind": "def", "category": "function", "info": " def _is_setup_finished(self) -> bool:\n return self.debug and self._got_first_request\n\n @locked_cached_property\n def name(self) -> str: # type: ignore\n \"\"\"The name of the application. This is usually the import name\n with the difference that it's guessed from the run file if the\n import name is main. This name is used as a display name when\n Flask needs the name of the application. It can be set and overridden\n to change the value.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.import_name == \"__main__\":\n fn = getattr(sys.modules[\"__main__\"], \"__file__\", None)\n if fn is None:\n return \"__main__\"\n return os.path.splitext(os.path.basename(fn))[0]\n return self.import_name\n\n @property\n def propagate_exceptions(self) -> bool:\n \"\"\"Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration\n value in case it's set, otherwise a sensible default is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PROPAGATE_EXCEPTIONS\"]\n if rv is not None:\n return rv\n return self.testing or self.debug\n\n @property\n def preserve_context_on_exception(self) -> bool:\n \"\"\"Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``\n configuration value in case it's set, otherwise a sensible default\n is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PRESERVE_CONTEXT_ON_EXCEPTION\"]\n if rv is not None:\n return rv\n return self.debug\n\n @locked_cached_property\n def logger(self) -> logging.Logger:\n \"\"\"A standard Python :class:`~logging.Logger` for the app, with\n the same name as :attr:`name`.\n\n In debug mode, the logger's :attr:`~logging.Logger.level` will\n be set to :data:`~logging.DEBUG`.\n\n If there are no handlers configured, a default handler will be\n added. See :doc:`/logging` for more information.\n\n .. versionchanged:: 1.1.0\n The logger takes the same name as :attr:`name` rather than\n hard-coding ``\"flask.app\"``.\n\n .. versionchanged:: 1.0.0\n Behavior was simplified. The logger is always named\n ``\"flask.app\"``. The level is only set during configuration,\n it doesn't check ``app.debug`` each time. Only one format is\n used, not different ones depending on ``app.debug``. No\n handlers are removed, and a handler is only added if no\n handlers are already configured.\n\n .. versionadded:: 0.3\n \"\"\"\n return create_logger(self)\n\n @locked_cached_property\n def jinja_env(self) -> Environment:\n \"\"\"The Jinja environment used to load templates.\n\n The environment is created the first time this property is\n accessed. Changing :attr:`jinja_options` after that will have no\n effect.\n \"\"\"\n return self.create_jinja_environment()\n\n @property\n def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 527, "name": "name", "kind": "def", "category": "function", "info": " def name(self) -> str: # type: ignore\n \"\"\"The name of the application. This is usually the import name\n with the difference that it's guessed from the run file if the\n import name is main. This name is used as a display name when\n Flask needs the name of the application. It can be set and overridden\n to change the value.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.import_name == \"__main__\":\n fn = getattr(sys.modules[\"__main__\"], \"__file__\", None)\n if fn is None:\n return \"__main__\"\n return os.path.splitext(os.path.basename(fn))[0]\n return self.import_name\n\n @property\n def propagate_exceptions(self) -> bool:\n \"\"\"Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration\n value in case it's set, otherwise a sensible default is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PROPAGATE_EXCEPTIONS\"]\n if rv is not None:\n return rv\n return self.testing or self.debug\n\n @property\n def preserve_context_on_exception(self) -> bool:\n \"\"\"Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``\n configuration value in case it's set, otherwise a sensible default\n is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PRESERVE_CONTEXT_ON_EXCEPTION\"]\n if rv is not None:\n return rv\n return self.debug\n\n @locked_cached_property\n def logger(self) -> logging.Logger:\n \"\"\"A standard Python :class:`~logging.Logger` for the app, with\n the same name as :attr:`name`.\n\n In debug mode, the logger's :attr:`~logging.Logger.level` will\n be set to :data:`~logging.DEBUG`.\n\n If there are no handlers configured, a default handler will be\n added. See :doc:`/logging` for more information.\n\n .. versionchanged:: 1.1.0\n The logger takes the same name as :attr:`name` rather than\n hard-coding ``\"flask.app\"``.\n\n .. versionchanged:: 1.0.0\n Behavior was simplified. The logger is always named\n ``\"flask.app\"``. The level is only set during configuration,\n it doesn't check ``app.debug`` each time. Only one format is\n used, not different ones depending on ``app.debug``. No\n handlers are removed, and a handler is only added if no\n handlers are already configured.\n\n .. versionadded:: 0.3\n \"\"\"\n return create_logger(self)\n\n @locked_cached_property\n def jinja_env(self) -> Environment:\n \"\"\"The Jinja environment used to load templates.\n\n The environment is created the first time this property is\n accessed. Changing :attr:`jinja_options` after that will have no\n effect.\n \"\"\"\n return self.create_jinja_environment()\n\n @property\n def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 540, "name": "splitext", "kind": "ref", "category": "function", "info": " return os.path.splitext(os.path.basename(fn))[0]\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 540, "name": "basename", "kind": "ref", "category": "function", "info": " return os.path.splitext(os.path.basename(fn))[0]\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 544, "name": "propagate_exceptions", "kind": "def", "category": "function", "info": " def propagate_exceptions(self) -> bool:\n \"\"\"Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration\n value in case it's set, otherwise a sensible default is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PROPAGATE_EXCEPTIONS\"]\n if rv is not None:\n return rv\n return self.testing or self.debug\n\n @property\n def preserve_context_on_exception(self) -> bool:\n \"\"\"Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``\n configuration value in case it's set, otherwise a sensible default\n is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PRESERVE_CONTEXT_ON_EXCEPTION\"]\n if rv is not None:\n return rv\n return self.debug\n\n @locked_cached_property\n def logger(self) -> logging.Logger:\n \"\"\"A standard Python :class:`~logging.Logger` for the app, with\n the same name as :attr:`name`.\n\n In debug mode, the logger's :attr:`~logging.Logger.level` will\n be set to :data:`~logging.DEBUG`.\n\n If there are no handlers configured, a default handler will be\n added. See :doc:`/logging` for more information.\n\n .. versionchanged:: 1.1.0\n The logger takes the same name as :attr:`name` rather than\n hard-coding ``\"flask.app\"``.\n\n .. versionchanged:: 1.0.0\n Behavior was simplified. The logger is always named\n ``\"flask.app\"``. The level is only set during configuration,\n it doesn't check ``app.debug`` each time. Only one format is\n used, not different ones depending on ``app.debug``. No\n handlers are removed, and a handler is only added if no\n handlers are already configured.\n\n .. versionadded:: 0.3\n \"\"\"\n return create_logger(self)\n\n @locked_cached_property\n def jinja_env(self) -> Environment:\n \"\"\"The Jinja environment used to load templates.\n\n The environment is created the first time this property is\n accessed. Changing :attr:`jinja_options` after that will have no\n effect.\n \"\"\"\n return self.create_jinja_environment()\n\n @property\n def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 556, "name": "preserve_context_on_exception", "kind": "def", "category": "function", "info": " def preserve_context_on_exception(self) -> bool:\n \"\"\"Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION``\n configuration value in case it's set, otherwise a sensible default\n is returned.\n\n .. versionadded:: 0.7\n \"\"\"\n rv = self.config[\"PRESERVE_CONTEXT_ON_EXCEPTION\"]\n if rv is not None:\n return rv\n return self.debug\n\n @locked_cached_property\n def logger(self) -> logging.Logger:\n \"\"\"A standard Python :class:`~logging.Logger` for the app, with\n the same name as :attr:`name`.\n\n In debug mode, the logger's :attr:`~logging.Logger.level` will\n be set to :data:`~logging.DEBUG`.\n\n If there are no handlers configured, a default handler will be\n added. See :doc:`/logging` for more information.\n\n .. versionchanged:: 1.1.0\n The logger takes the same name as :attr:`name` rather than\n hard-coding ``\"flask.app\"``.\n\n .. versionchanged:: 1.0.0\n Behavior was simplified. The logger is always named\n ``\"flask.app\"``. The level is only set during configuration,\n it doesn't check ``app.debug`` each time. Only one format is\n used, not different ones depending on ``app.debug``. No\n handlers are removed, and a handler is only added if no\n handlers are already configured.\n\n .. versionadded:: 0.3\n \"\"\"\n return create_logger(self)\n\n @locked_cached_property\n def jinja_env(self) -> Environment:\n \"\"\"The Jinja environment used to load templates.\n\n The environment is created the first time this property is\n accessed. Changing :attr:`jinja_options` after that will have no\n effect.\n \"\"\"\n return self.create_jinja_environment()\n\n @property\n def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 569, "name": "logger", "kind": "def", "category": "function", "info": " def logger(self) -> logging.Logger:\n \"\"\"A standard Python :class:`~logging.Logger` for the app, with\n the same name as :attr:`name`.\n\n In debug mode, the logger's :attr:`~logging.Logger.level` will\n be set to :data:`~logging.DEBUG`.\n\n If there are no handlers configured, a default handler will be\n added. See :doc:`/logging` for more information.\n\n .. versionchanged:: 1.1.0\n The logger takes the same name as :attr:`name` rather than\n hard-coding ``\"flask.app\"``.\n\n .. versionchanged:: 1.0.0\n Behavior was simplified. The logger is always named\n ``\"flask.app\"``. The level is only set during configuration,\n it doesn't check ``app.debug`` each time. Only one format is\n used, not different ones depending on ``app.debug``. No\n handlers are removed, and a handler is only added if no\n handlers are already configured.\n\n .. versionadded:: 0.3\n \"\"\"\n return create_logger(self)\n\n @locked_cached_property\n def jinja_env(self) -> Environment:\n \"\"\"The Jinja environment used to load templates.\n\n The environment is created the first time this property is\n accessed. Changing :attr:`jinja_options` after that will have no\n effect.\n \"\"\"\n return self.create_jinja_environment()\n\n @property\n def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 593, "name": "create_logger", "kind": "ref", "category": "function", "info": " return create_logger(self)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 596, "name": "jinja_env", "kind": "def", "category": "function", "info": " def jinja_env(self) -> Environment:\n \"\"\"The Jinja environment used to load templates.\n\n The environment is created the first time this property is\n accessed. Changing :attr:`jinja_options` after that will have no\n effect.\n \"\"\"\n return self.create_jinja_environment()\n\n @property\n def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 603, "name": "create_jinja_environment", "kind": "ref", "category": "function", "info": " return self.create_jinja_environment()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 606, "name": "got_first_request", "kind": "def", "category": "function", "info": " def got_first_request(self) -> bool:\n \"\"\"This attribute is set to ``True`` if the application started\n handling the first request.\n\n .. versionadded:: 0.8\n \"\"\"\n return self._got_first_request\n\n def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 614, "name": "make_config", "kind": "def", "category": "function", "info": " def make_config(self, instance_relative: bool = False) -> Config:\n \"\"\"Used to create the config attribute by the Flask constructor.\n The `instance_relative` parameter is passed in from the constructor\n of Flask (there named `instance_relative_config`) and indicates if\n the config should be relative to the instance path or the root path\n of the application.\n\n .. versionadded:: 0.8\n \"\"\"\n root_path = self.root_path\n if instance_relative:\n root_path = self.instance_path\n defaults = dict(self.default_config)\n defaults[\"ENV\"] = get_env()\n defaults[\"DEBUG\"] = get_debug_flag()\n return self.config_class(root_path, defaults)\n\n def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 627, "name": "get_env", "kind": "ref", "category": "function", "info": " defaults[\"ENV\"] = get_env()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 628, "name": "get_debug_flag", "kind": "ref", "category": "function", "info": " defaults[\"DEBUG\"] = get_debug_flag()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 629, "name": "config_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 631, "name": "auto_find_instance_path", "kind": "def", "category": "function", "info": " def auto_find_instance_path(self) -> str:\n \"\"\"Tries to locate the instance path if it was not provided to the\n constructor of the application class. It will basically calculate\n the path to a folder named ``instance`` next to your main file or\n the package.\n\n .. versionadded:: 0.8\n \"\"\"\n prefix, package_path = find_package(self.import_name)\n if prefix is None:\n return os.path.join(package_path, \"instance\")\n return os.path.join(prefix, \"var\", f\"{self.name}-instance\")\n\n def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 639, "name": "find_package", "kind": "ref", "category": "function", "info": " prefix, package_path = find_package(self.import_name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 644, "name": "open_instance_resource", "kind": "def", "category": "function", "info": " def open_instance_resource(self, resource: str, mode: str = \"rb\") -> t.IO[t.AnyStr]:\n \"\"\"Opens a resource from the application's instance folder\n (:attr:`instance_path`). Otherwise works like\n :meth:`open_resource`. Instance resources can also be opened for\n writing.\n\n :param resource: the name of the resource. To access resources within\n subfolders use forward slashes as separator.\n :param mode: resource file opening mode, default is 'rb'.\n \"\"\"\n return open(os.path.join(self.instance_path, resource), mode)\n\n @property\n def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 657, "name": "templates_auto_reload", "kind": "def", "category": "function", "info": " def templates_auto_reload(self) -> bool:\n \"\"\"Reload templates when they are changed. Used by\n :meth:`create_jinja_environment`.\n\n This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If\n not set, it will be enabled in debug mode.\n\n .. versionadded:: 1.0\n This property was added but the underlying config and behavior\n already existed.\n \"\"\"\n rv = self.config[\"TEMPLATES_AUTO_RELOAD\"]\n return rv if rv is not None else self.debug\n\n @templates_auto_reload.setter\n def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 672, "name": "templates_auto_reload", "kind": "def", "category": "function", "info": " def templates_auto_reload(self, value: bool) -> None:\n self.config[\"TEMPLATES_AUTO_RELOAD\"] = value\n\n def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 675, "name": "create_jinja_environment", "kind": "def", "category": "function", "info": " def create_jinja_environment(self) -> Environment:\n \"\"\"Create the Jinja environment based on :attr:`jinja_options`\n and the various Jinja-related methods of the app. Changing\n :attr:`jinja_options` after this will have no effect. Also adds\n Flask-related globals and filters to the environment.\n\n .. versionchanged:: 0.11\n ``Environment.auto_reload`` set in accordance with\n ``TEMPLATES_AUTO_RELOAD`` configuration option.\n\n .. versionadded:: 0.5\n \"\"\"\n options = dict(self.jinja_options)\n\n if \"autoescape\" not in options:\n options[\"autoescape\"] = self.select_jinja_autoescape\n\n if \"auto_reload\" not in options:\n options[\"auto_reload\"] = self.templates_auto_reload\n\n rv = self.jinja_environment(self, **options)\n rv.globals.update(\n url_for=url_for,\n get_flashed_messages=get_flashed_messages,\n config=self.config,\n # request, session and g are normally added with the\n # context processor for efficiency reasons but for imported\n # templates we also want the proxies in there.\n request=request,\n session=session,\n g=g,\n )\n rv.policies[\"json.dumps_function\"] = json.dumps\n return rv\n\n def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 695, "name": "jinja_environment", "kind": "ref", "category": "function", "info": " rv = self.jinja_environment(self, **options)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 710, "name": "create_global_jinja_loader", "kind": "def", "category": "function", "info": " def create_global_jinja_loader(self) -> DispatchingJinjaLoader:\n \"\"\"Creates the loader for the Jinja2 environment. Can be used to\n override just the loader and keeping the rest unchanged. It's\n discouraged to override this function. Instead one should override\n the :meth:`jinja_loader` function instead.\n\n The global loader dispatches between the loaders of the application\n and the individual blueprints.\n\n .. versionadded:: 0.7\n \"\"\"\n return DispatchingJinjaLoader(self)\n\n def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 721, "name": "DispatchingJinjaLoader", "kind": "ref", "category": "function", "info": " return DispatchingJinjaLoader(self)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 723, "name": "select_jinja_autoescape", "kind": "def", "category": "function", "info": " def select_jinja_autoescape(self, filename: str) -> bool:\n \"\"\"Returns ``True`` if autoescaping should be active for the given\n template name. If no template name is given, returns `True`.\n\n .. versionadded:: 0.5\n \"\"\"\n if filename is None:\n return True\n return filename.endswith((\".html\", \".htm\", \".xml\", \".xhtml\"))\n\n def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 733, "name": "update_template_context", "kind": "def", "category": "function", "info": " def update_template_context(self, context: dict) -> None:\n \"\"\"Update the template context with some commonly used variables.\n This injects request, session, config and g into the template\n context as well as everything template context processors want\n to inject. Note that the as of Flask 0.6, the original values\n in the context will not be overridden if a context processor\n decides to return a value with the same key.\n\n :param context: the context as a dictionary that is updated in place\n to add extra variables.\n \"\"\"\n funcs: t.Iterable[\n TemplateContextProcessorCallable\n ] = self.template_context_processors[None]\n reqctx = _request_ctx_stack.top\n if reqctx is not None:\n for bp in self._request_blueprints():\n if bp in self.template_context_processors:\n funcs = chain(funcs, self.template_context_processors[bp])\n orig_ctx = context.copy()\n for func in funcs:\n context.update(func())\n # make sure the original values win. This makes it possible to\n # easier add new variables in context processors without breaking\n # existing views.\n context.update(orig_ctx)\n\n def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 749, "name": "_request_blueprints", "kind": "ref", "category": "function", "info": " for bp in self._request_blueprints():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 754, "name": "func", "kind": "ref", "category": "function", "info": " context.update(func())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 760, "name": "make_shell_context", "kind": "def", "category": "function", "info": " def make_shell_context(self) -> dict:\n \"\"\"Returns the shell context for an interactive shell for this\n application. This runs all the registered shell context\n processors.\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {\"app\": self, \"g\": g}\n for processor in self.shell_context_processors:\n rv.update(processor())\n return rv\n\n #: What environment the app is running in. Flask and extensions may\n #: enable behaviors based on the environment, such as enabling debug\n #: mode. This maps to the :data:`ENV` config key. This is set by the\n #: :envvar:`FLASK_ENV` environment variable and may not behave as\n #: expected if set in code.\n #:\n #: **Do not enable development when deploying in production.**\n #:\n #: Default: ``'production'``\n env = ConfigAttribute(\"ENV\")\n\n @property\n def debug(self) -> bool:\n \"\"\"Whether debug mode is enabled. When using ``flask run`` to start\n the development server, an interactive debugger will be shown for\n unhandled exceptions, and the server will be reloaded when code\n changes. This maps to the :data:`DEBUG` config key. This is\n enabled when :attr:`env` is ``'development'`` and is overridden\n by the ``FLASK_DEBUG`` environment variable. It may not behave as\n expected if set in code.\n\n **Do not enable debug mode when deploying in production.**\n\n Default: ``True`` if :attr:`env` is ``'development'``, or\n ``False`` otherwise.\n \"\"\"\n return self.config[\"DEBUG\"]\n\n @debug.setter\n def debug(self, value: bool) -> None:\n self.config[\"DEBUG\"] = value\n self.jinja_env.auto_reload = self.templates_auto_reload\n\n def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 769, "name": "processor", "kind": "ref", "category": "function", "info": " rv.update(processor())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 781, "name": "ConfigAttribute", "kind": "ref", "category": "function", "info": " env = ConfigAttribute(\"ENV\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 805, "name": "run", "kind": "def", "category": "function", "info": " def run(\n self,\n host: t.Optional[str] = None,\n port: t.Optional[int] = None,\n debug: t.Optional[bool] = None,\n load_dotenv: bool = True,\n **options: t.Any,\n ) -> None:\n \"\"\"Runs the application on a local development server.\n\n Do not use ``run()`` in a production setting. It is not intended to\n meet security and performance requirements for a production server.\n Instead, see :doc:`/deploying/index` for WSGI server recommendations.\n\n If the :attr:`debug` flag is set the server will automatically reload\n for code changes and show a debugger in case an exception happened.\n\n If you want to run the application in debug mode, but disable the\n code execution on the interactive debugger, you can pass\n ``use_evalex=False`` as parameter. This will keep the debugger's\n traceback screen active, but disable code execution.\n\n It is not recommended to use this function for development with\n automatic reloading as this is badly supported. Instead you should\n be using the :command:`flask` command line script's ``run`` support.\n\n .. admonition:: Keep in Mind\n\n Flask will suppress any server error with a generic error page\n unless it is in debug mode. As such to enable just the\n interactive debugger without the code reloading, you have to\n invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.\n Setting ``use_debugger`` to ``True`` without being in debug mode\n won't catch any exceptions because there won't be any to\n catch.\n\n :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to\n have the server available externally as well. Defaults to\n ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable\n if present.\n :param port: the port of the webserver. Defaults to ``5000`` or the\n port defined in the ``SERVER_NAME`` config variable if present.\n :param debug: if given, enable or disable debug mode. See\n :attr:`debug`.\n :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`\n files to set environment variables. Will also change the working\n directory to the directory containing the first file found.\n :param options: the options to be forwarded to the underlying Werkzeug\n server. See :func:`werkzeug.serving.run_simple` for more\n information.\n\n .. versionchanged:: 1.0\n If installed, python-dotenv will be used to load environment\n variables from :file:`.env` and :file:`.flaskenv` files.\n\n If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG`\n environment variables will override :attr:`env` and\n :attr:`debug`.\n\n Threaded mode is enabled by default.\n\n .. versionchanged:: 0.10\n The default port is now picked from the ``SERVER_NAME``\n variable.\n \"\"\"\n # Change this into a no-op if the server is invoked from the\n # command line. Have a look at cli.py for more information.\n if os.environ.get(\"FLASK_RUN_FROM_CLI\") == \"true\":\n from .debughelpers import explain_ignored_app_run\n\n explain_ignored_app_run()\n return\n\n if get_load_dotenv(load_dotenv):\n cli.load_dotenv()\n\n # if set, let env vars override previous values\n if \"FLASK_ENV\" in os.environ:\n self.env = get_env()\n self.debug = get_debug_flag()\n elif \"FLASK_DEBUG\" in os.environ:\n self.debug = get_debug_flag()\n\n # debug passed to method overrides all other sources\n if debug is not None:\n self.debug = bool(debug)\n\n server_name = self.config.get(\"SERVER_NAME\")\n sn_host = sn_port = None\n\n if server_name:\n sn_host, _, sn_port = server_name.partition(\":\")\n\n if not host:\n if sn_host:\n host = sn_host\n else:\n host = \"127.0.0.1\"\n\n if port or port == 0:\n port = int(port)\n elif sn_port:\n port = int(sn_port)\n else:\n port = 5000\n\n options.setdefault(\"use_reloader\", self.debug)\n options.setdefault(\"use_debugger\", self.debug)\n options.setdefault(\"threaded\", True)\n\n cli.show_server_banner(self.env, self.debug, self.name, False)\n\n from werkzeug.serving import run_simple\n\n try:\n run_simple(t.cast(str, host), port, self, **options)\n finally:\n # reset the first request information if the development server\n # reset normally. This makes it possible to restart the server\n # without reloader and that stuff from an interactive shell.\n self._got_first_request = False\n\n def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 875, "name": "explain_ignored_app_run", "kind": "ref", "category": "function", "info": " explain_ignored_app_run()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 878, "name": "get_load_dotenv", "kind": "ref", "category": "function", "info": " if get_load_dotenv(load_dotenv):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 879, "name": "load_dotenv", "kind": "ref", "category": "function", "info": " cli.load_dotenv()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 883, "name": "get_env", "kind": "ref", "category": "function", "info": " self.env = get_env()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 884, "name": "get_debug_flag", "kind": "ref", "category": "function", "info": " self.debug = get_debug_flag()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 886, "name": "get_debug_flag", "kind": "ref", "category": "function", "info": " self.debug = get_debug_flag()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 915, "name": "show_server_banner", "kind": "ref", "category": "function", "info": " cli.show_server_banner(self.env, self.debug, self.name, False)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 927, "name": "test_client", "kind": "def", "category": "function", "info": " def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> \"FlaskClient\":\n \"\"\"Creates a test client for this application. For information\n about unit testing head over to :doc:`/testing`.\n\n Note that if you are testing for assertions or exceptions in your\n application code, you must set ``app.testing = True`` in order for the\n exceptions to propagate to the test client. Otherwise, the exception\n will be handled by the application (not visible to the test client) and\n the only indication of an AssertionError or other exception will be a\n 500 status code response to the test client. See the :attr:`testing`\n attribute. For example::\n\n app.testing = True\n client = app.test_client()\n\n The test client can be used in a ``with`` block to defer the closing down\n of the context until the end of the ``with`` block. This is useful if\n you want to access the context locals for testing::\n\n with app.test_client() as c:\n rv = c.get('/?vodka=42')\n assert request.args['vodka'] == '42'\n\n Additionally, you may pass optional keyword arguments that will then\n be passed to the application's :attr:`test_client_class` constructor.\n For example::\n\n from flask.testing import FlaskClient\n\n class CustomClient(FlaskClient):\n def __init__(self, *args, **kwargs):\n self._authentication = kwargs.pop(\"authentication\")\n super(CustomClient,self).__init__( *args, **kwargs)\n\n app.test_client_class = CustomClient\n client = app.test_client(authentication='Basic ....')\n\n See :class:`~flask.testing.FlaskClient` for more information.\n\n .. versionchanged:: 0.4\n added support for ``with`` block usage for the client.\n\n .. versionadded:: 0.7\n The `use_cookies` parameter was added as well as the ability\n to override the client to be used by setting the\n :attr:`test_client_class` attribute.\n\n .. versionchanged:: 0.11\n Added `**kwargs` to support passing additional keyword arguments to\n the constructor of :attr:`test_client_class`.\n \"\"\"\n cls = self.test_client_class\n if cls is None:\n from .testing import FlaskClient as cls # type: ignore\n return cls( # type: ignore\n self, self.response_class, use_cookies=use_cookies, **kwargs\n )\n\n def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 981, "name": "cls", "kind": "ref", "category": "function", "info": " return cls( # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 985, "name": "test_cli_runner", "kind": "def", "category": "function", "info": " def test_cli_runner(self, **kwargs: t.Any) -> \"FlaskCliRunner\":\n \"\"\"Create a CLI runner for testing CLI commands.\n See :ref:`testing-cli`.\n\n Returns an instance of :attr:`test_cli_runner_class`, by default\n :class:`~flask.testing.FlaskCliRunner`. The Flask app object is\n passed as the first argument.\n\n .. versionadded:: 1.0\n \"\"\"\n cls = self.test_cli_runner_class\n\n if cls is None:\n from .testing import FlaskCliRunner as cls # type: ignore\n\n return cls(self, **kwargs) # type: ignore\n\n @setupmethod\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1000, "name": "cls", "kind": "ref", "category": "function", "info": " return cls(self, **kwargs) # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1003, "name": "register_blueprint", "kind": "def", "category": "function", "info": " def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on the application. Keyword\n arguments passed to this method will override the defaults set on the\n blueprint.\n\n Calls the blueprint's :meth:`~flask.Blueprint.register` method after\n recording the blueprint in the application's :attr:`blueprints`.\n\n :param blueprint: The blueprint to register.\n :param url_prefix: Blueprint routes will be prefixed with this.\n :param subdomain: Blueprint routes will match on this subdomain.\n :param url_defaults: Blueprint routes will use these default values for\n view arguments.\n :param options: Additional keyword arguments are passed to\n :class:`~flask.blueprints.BlueprintSetupState`. They can be\n accessed in :meth:`~flask.Blueprint.record` callbacks.\n\n .. versionadded:: 0.7\n \"\"\"\n blueprint.register(self, options)\n\n def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1022, "name": "register", "kind": "ref", "category": "function", "info": " blueprint.register(self, options)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1024, "name": "iter_blueprints", "kind": "def", "category": "function", "info": " def iter_blueprints(self) -> t.ValuesView[\"Blueprint\"]:\n \"\"\"Iterates over all blueprints by the order they were registered.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.blueprints.values()\n\n @setupmethod\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1032, "name": "add_url_rule", "kind": "def", "category": "function", "info": " def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n provide_automatic_options: t.Optional[bool] = None,\n **options: t.Any,\n ) -> None:\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n options[\"endpoint\"] = endpoint\n methods = options.pop(\"methods\", None)\n\n # if the methods are not given and the view_func object knows its\n # methods we can use that instead. If neither exists, we go with\n # a tuple of only ``GET`` as default.\n if methods is None:\n methods = getattr(view_func, \"methods\", None) or (\"GET\",)\n if isinstance(methods, str):\n raise TypeError(\n \"Allowed methods must be a list of strings, for\"\n ' example: @app.route(..., methods=[\"POST\"])'\n )\n methods = {item.upper() for item in methods}\n\n # Methods that should always be added\n required_methods = set(getattr(view_func, \"required_methods\", ()))\n\n # starting with Flask 0.8 the view_func object can disable and\n # force-enable the automatic options handling.\n if provide_automatic_options is None:\n provide_automatic_options = getattr(\n view_func, \"provide_automatic_options\", None\n )\n\n if provide_automatic_options is None:\n if \"OPTIONS\" not in methods:\n provide_automatic_options = True\n required_methods.add(\"OPTIONS\")\n else:\n provide_automatic_options = False\n\n # Add the required methods now.\n methods |= required_methods\n\n rule = self.url_rule_class(rule, methods=methods, **options)\n rule.provide_automatic_options = provide_automatic_options # type: ignore\n\n self.url_map.add(rule)\n if view_func is not None:\n old_func = self.view_functions.get(endpoint)\n if old_func is not None and old_func != view_func:\n raise AssertionError(\n \"View function mapping is overwriting an existing\"\n f\" endpoint function: {endpoint}\"\n )\n self.view_functions[endpoint] = view_func\n\n @setupmethod\n def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1041, "name": "_endpoint_from_view_func", "kind": "ref", "category": "function", "info": " endpoint = _endpoint_from_view_func(view_func) # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1077, "name": "url_rule_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1091, "name": "template_filter", "kind": "def", "category": "function", "info": " def template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template filter.\n You can specify a name for the filter, otherwise the function\n name will be used. Example::\n\n @app.template_filter()\n def reverse(s):\n return s[::-1]\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1104, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_template_filter(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1105, "name": "add_template_filter", "kind": "ref", "category": "function", "info": " self.add_template_filter(f, name=name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1111, "name": "add_template_filter", "kind": "def", "category": "function", "info": " def add_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter. Works exactly like the\n :meth:`template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.filters[name or f.__name__] = f\n\n @setupmethod\n def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1123, "name": "template_test", "kind": "def", "category": "function", "info": " def template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register custom template test.\n You can specify a name for the test, otherwise the function\n name will be used. Example::\n\n @app.template_test()\n def is_prime(n):\n if n == 2:\n return True\n for i in range(2, int(math.ceil(math.sqrt(n))) + 1):\n if n % i == 0:\n return False\n return True\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1143, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_template_test(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1144, "name": "add_template_test", "kind": "ref", "category": "function", "info": " self.add_template_test(f, name=name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1150, "name": "add_template_test", "kind": "def", "category": "function", "info": " def add_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test. Works exactly like the\n :meth:`template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.tests[name or f.__name__] = f\n\n @setupmethod\n def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1164, "name": "template_global", "kind": "def", "category": "function", "info": " def template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"A decorator that is used to register a custom template global function.\n You can specify a name for the global function, otherwise the function\n name will be used. Example::\n\n @app.template_global()\n def double(n):\n return 2 * n\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1179, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_template_global(f, name=name)\n return f\n\n return decorator\n\n @setupmethod\n def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1180, "name": "add_template_global", "kind": "ref", "category": "function", "info": " self.add_template_global(f, name=name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1186, "name": "add_template_global", "kind": "def", "category": "function", "info": " def add_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global function. Works exactly like the\n :meth:`template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global function, otherwise the\n function name will be used.\n \"\"\"\n self.jinja_env.globals[name or f.__name__] = f\n\n @setupmethod\n def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1200, "name": "before_first_request", "kind": "def", "category": "function", "info": " def before_first_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Registers a function to be run before the first request to this\n instance of the application.\n\n The function will be called without any arguments and its return\n value is ignored.\n\n .. versionadded:: 0.8\n \"\"\"\n self.before_first_request_funcs.append(f)\n return f\n\n @setupmethod\n def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1213, "name": "teardown_appcontext", "kind": "def", "category": "function", "info": " def teardown_appcontext(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Registers a function to be called when the application context\n ends. These functions are typically also called when the request\n context is popped.\n\n Example::\n\n ctx = app.app_context()\n ctx.push()\n ...\n ctx.pop()\n\n When ``ctx.pop()`` is executed in the above example, the teardown\n functions are called just before the app context moves from the\n stack of active contexts. This becomes relevant if you are using\n such constructs in tests.\n\n Since a request context typically also manages an application\n context it would also be called when you pop a request context.\n\n When a teardown function was called because of an unhandled exception\n it will be passed an error object. If an :meth:`errorhandler` is\n registered, it will handle the exception and the teardown will not\n receive it.\n\n The return values of teardown functions are ignored.\n\n .. versionadded:: 0.9\n \"\"\"\n self.teardown_appcontext_funcs.append(f)\n return f\n\n @setupmethod\n def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1246, "name": "shell_context_processor", "kind": "def", "category": "function", "info": " def shell_context_processor(self, f: t.Callable) -> t.Callable:\n \"\"\"Registers a shell context processor function.\n\n .. versionadded:: 0.11\n \"\"\"\n self.shell_context_processors.append(f)\n return f\n\n def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1254, "name": "_find_error_handler", "kind": "def", "category": "function", "info": " def _find_error_handler(self, e: Exception) -> t.Optional[ErrorHandlerCallable]:\n \"\"\"Return a registered error handler for an exception in this order:\n blueprint handler for a specific code, app handler for a specific code,\n blueprint handler for an exception class, app handler for an exception\n class, or ``None`` if a suitable handler is not found.\n \"\"\"\n exc_class, code = self._get_exc_class_and_code(type(e))\n\n for c in [code, None]:\n for name in chain(self._request_blueprints(), [None]):\n handler_map = self.error_handler_spec[name][c]\n\n if not handler_map:\n continue\n\n for cls in exc_class.__mro__:\n handler = handler_map.get(cls)\n\n if handler is not None:\n return handler\n return None\n\n def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1260, "name": "_get_exc_class_and_code", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1263, "name": "_request_blueprints", "kind": "ref", "category": "function", "info": " for name in chain(self._request_blueprints(), [None]):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1276, "name": "handle_http_exception", "kind": "def", "category": "function", "info": " def handle_http_exception(\n self, e: HTTPException\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"Handles an HTTP exception. By default this will invoke the\n registered error handlers and fall back to returning the\n exception as response.\n\n .. versionchanged:: 1.0.3\n ``RoutingException``, used internally for actions such as\n slash redirects during routing, is not passed to error\n handlers.\n\n .. versionchanged:: 1.0\n Exceptions are looked up by code *and* by MRO, so\n ``HTTPExcpetion`` subclasses can be handled with a catch-all\n handler for the base ``HTTPException``.\n\n .. versionadded:: 0.3\n \"\"\"\n # Proxy exceptions don't have error codes. We want to always return\n # those unchanged as errors\n if e.code is None:\n return e\n\n # RoutingExceptions are used internally to trigger routing\n # actions, such as slash redirects raising RequestRedirect. They\n # are not raised or handled in user code.\n if isinstance(e, RoutingException):\n return e\n\n handler = self._find_error_handler(e)\n if handler is None:\n return e\n return self.ensure_sync(handler)(e)\n\n def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1306, "name": "_find_error_handler", "kind": "ref", "category": "function", "info": " handler = self._find_error_handler(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1309, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " return self.ensure_sync(handler)(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1311, "name": "trap_http_exception", "kind": "def", "category": "function", "info": " def trap_http_exception(self, e: Exception) -> bool:\n \"\"\"Checks if an HTTP exception should be trapped or not. By default\n this will return ``False`` for all exceptions except for a bad request\n key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It\n also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.\n\n This is called for all HTTP exceptions raised by a view function.\n If it returns ``True`` for any exception the error handler for this\n exception is not called and it shows up as regular exception in the\n traceback. This is helpful for debugging implicitly raised HTTP\n exceptions.\n\n .. versionchanged:: 1.0\n Bad request errors are not trapped by default in debug mode.\n\n .. versionadded:: 0.8\n \"\"\"\n if self.config[\"TRAP_HTTP_EXCEPTIONS\"]:\n return True\n\n trap_bad_request = self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n\n # if unset, trap key errors in debug mode\n if (\n trap_bad_request is None\n and self.debug\n and isinstance(e, BadRequestKeyError)\n ):\n return True\n\n if trap_bad_request:\n return isinstance(e, BadRequest)\n\n return False\n\n def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1346, "name": "handle_user_exception", "kind": "def", "category": "function", "info": " def handle_user_exception(\n self, e: Exception\n ) -> t.Union[HTTPException, ResponseReturnValue]:\n \"\"\"This method is called whenever an exception occurs that\n should be handled. A special case is :class:`~werkzeug\n .exceptions.HTTPException` which is forwarded to the\n :meth:`handle_http_exception` method. This function will either\n return a response value or reraise the exception with the same\n traceback.\n\n .. versionchanged:: 1.0\n Key errors raised from request data like ``form`` show the\n bad key in debug mode rather than a generic bad request\n message.\n\n .. versionadded:: 0.7\n \"\"\"\n if isinstance(e, BadRequestKeyError) and (\n self.debug or self.config[\"TRAP_BAD_REQUEST_ERRORS\"]\n ):\n e.show_exception = True\n\n if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n return self.handle_http_exception(e)\n\n handler = self._find_error_handler(e)\n\n if handler is None:\n raise\n\n return self.ensure_sync(handler)(e)\n\n def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1368, "name": "trap_http_exception", "kind": "ref", "category": "function", "info": " if isinstance(e, HTTPException) and not self.trap_http_exception(e):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1369, "name": "handle_http_exception", "kind": "ref", "category": "function", "info": " return self.handle_http_exception(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1371, "name": "_find_error_handler", "kind": "ref", "category": "function", "info": " handler = self._find_error_handler(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1376, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " return self.ensure_sync(handler)(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1378, "name": "handle_exception", "kind": "def", "category": "function", "info": " def handle_exception(self, e: Exception) -> Response:\n \"\"\"Handle an exception that did not have an error handler\n associated with it, or that was raised from an error handler.\n This always causes a 500 ``InternalServerError``.\n\n Always sends the :data:`got_request_exception` signal.\n\n If :attr:`propagate_exceptions` is ``True``, such as in debug\n mode, the error will be re-raised so that the debugger can\n display it. Otherwise, the original exception is logged, and\n an :exc:`~werkzeug.exceptions.InternalServerError` is returned.\n\n If an error handler is registered for ``InternalServerError`` or\n ``500``, it will be used. For consistency, the handler will\n always receive the ``InternalServerError``. The original\n unhandled exception is available as ``e.original_exception``.\n\n .. versionchanged:: 1.1.0\n Always passes the ``InternalServerError`` instance to the\n handler, setting ``original_exception`` to the unhandled\n error.\n\n .. versionchanged:: 1.1.0\n ``after_request`` functions and other finalization is done\n even for the default 500 response when there is no handler.\n\n .. versionadded:: 0.3\n \"\"\"\n exc_info = sys.exc_info()\n got_request_exception.send(self, exception=e)\n\n if self.propagate_exceptions:\n # Re-raise if called with an active exception, otherwise\n # raise the passed in exception.\n if exc_info[1] is e:\n raise\n\n raise e\n\n self.log_exception(exc_info)\n server_error: t.Union[InternalServerError, ResponseReturnValue]\n server_error = InternalServerError(original_exception=e)\n handler = self._find_error_handler(server_error)\n\n if handler is not None:\n server_error = self.ensure_sync(handler)(server_error)\n\n return self.finalize_request(server_error, from_error_handler=True)\n\n def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1407, "name": "send", "kind": "ref", "category": "function", "info": " got_request_exception.send(self, exception=e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1417, "name": "log_exception", "kind": "ref", "category": "function", "info": " self.log_exception(exc_info)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1420, "name": "_find_error_handler", "kind": "ref", "category": "function", "info": " handler = self._find_error_handler(server_error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1423, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " server_error = self.ensure_sync(handler)(server_error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1425, "name": "finalize_request", "kind": "ref", "category": "function", "info": " return self.finalize_request(server_error, from_error_handler=True)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1427, "name": "log_exception", "kind": "def", "category": "function", "info": " def log_exception(\n self,\n exc_info: t.Union[\n t.Tuple[type, BaseException, TracebackType], t.Tuple[None, None, None]\n ],\n ) -> None:\n \"\"\"Logs an exception. This is called by :meth:`handle_exception`\n if debugging is disabled and right before the handler is called.\n The default implementation logs the exception as error on the\n :attr:`logger`.\n\n .. versionadded:: 0.8\n \"\"\"\n self.logger.error(\n f\"Exception on {request.path} [{request.method}]\", exc_info=exc_info\n )\n\n def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1444, "name": "raise_routing_exception", "kind": "def", "category": "function", "info": " def raise_routing_exception(self, request: Request) -> \"te.NoReturn\":\n \"\"\"Exceptions that are recording during routing are reraised with\n this method. During debug we are not reraising redirect requests\n for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising\n a different error instead to help debug situations.\n\n :internal:\n \"\"\"\n if (\n not self.debug\n or not isinstance(request.routing_exception, RequestRedirect)\n or request.method in (\"GET\", \"HEAD\", \"OPTIONS\")\n ):\n raise request.routing_exception # type: ignore\n\n from .debughelpers import FormDataRoutingRedirect\n\n raise FormDataRoutingRedirect(request)\n\n def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1461, "name": "FormDataRoutingRedirect", "kind": "ref", "category": "function", "info": " raise FormDataRoutingRedirect(request)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1463, "name": "dispatch_request", "kind": "def", "category": "function", "info": " def dispatch_request(self) -> ResponseReturnValue:\n \"\"\"Does the request dispatching. Matches the URL and returns the\n return value of the view or error handler. This does not have to\n be a response object. In order to convert the return value to a\n proper response object, call :func:`make_response`.\n\n .. versionchanged:: 0.7\n This no longer does the exception handling, this code was\n moved to the new :meth:`full_dispatch_request`.\n \"\"\"\n req = _request_ctx_stack.top.request\n if req.routing_exception is not None:\n self.raise_routing_exception(req)\n rule = req.url_rule\n # if we provide automatic options for this URL and the\n # request came with the OPTIONS method, reply automatically\n if (\n getattr(rule, \"provide_automatic_options\", False)\n and req.method == \"OPTIONS\"\n ):\n return self.make_default_options_response()\n # otherwise dispatch to the handler for that endpoint\n return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n\n def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1475, "name": "raise_routing_exception", "kind": "ref", "category": "function", "info": " self.raise_routing_exception(req)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1483, "name": "make_default_options_response", "kind": "ref", "category": "function", "info": " return self.make_default_options_response()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1485, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " return self.ensure_sync(self.view_functions[rule.endpoint])(**req.view_args)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1487, "name": "full_dispatch_request", "kind": "def", "category": "function", "info": " def full_dispatch_request(self) -> Response:\n \"\"\"Dispatches the request and on top of that performs request\n pre and postprocessing as well as HTTP exception catching and\n error handling.\n\n .. versionadded:: 0.7\n \"\"\"\n self.try_trigger_before_first_request_functions()\n try:\n request_started.send(self)\n rv = self.preprocess_request()\n if rv is None:\n rv = self.dispatch_request()\n except Exception as e:\n rv = self.handle_user_exception(e)\n return self.finalize_request(rv)\n\n def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1494, "name": "try_trigger_before_first_request_functions", "kind": "ref", "category": "function", "info": " self.try_trigger_before_first_request_functions()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1496, "name": "send", "kind": "ref", "category": "function", "info": " request_started.send(self)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1497, "name": "preprocess_request", "kind": "ref", "category": "function", "info": " rv = self.preprocess_request()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1499, "name": "dispatch_request", "kind": "ref", "category": "function", "info": " rv = self.dispatch_request()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1501, "name": "handle_user_exception", "kind": "ref", "category": "function", "info": " rv = self.handle_user_exception(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1502, "name": "finalize_request", "kind": "ref", "category": "function", "info": " return self.finalize_request(rv)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1504, "name": "finalize_request", "kind": "def", "category": "function", "info": " def finalize_request(\n self,\n rv: t.Union[ResponseReturnValue, HTTPException],\n from_error_handler: bool = False,\n ) -> Response:\n \"\"\"Given the return value from a view function this finalizes\n the request by converting it into a response and invoking the\n postprocessing functions. This is invoked for both normal\n request dispatching as well as error handlers.\n\n Because this means that it might be called as a result of a\n failure a special safe mode is available which can be enabled\n with the `from_error_handler` flag. If enabled, failures in\n response processing will be logged and otherwise ignored.\n\n :internal:\n \"\"\"\n response = self.make_response(rv)\n try:\n response = self.process_response(response)\n request_finished.send(self, response=response)\n except Exception:\n if not from_error_handler:\n raise\n self.logger.exception(\n \"Request finalizing failed with an error while handling an error\"\n )\n return response\n\n def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1521, "name": "make_response", "kind": "ref", "category": "function", "info": " response = self.make_response(rv)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1523, "name": "process_response", "kind": "ref", "category": "function", "info": " response = self.process_response(response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1524, "name": "send", "kind": "ref", "category": "function", "info": " request_finished.send(self, response=response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1533, "name": "try_trigger_before_first_request_functions", "kind": "def", "category": "function", "info": " def try_trigger_before_first_request_functions(self) -> None:\n \"\"\"Called before each request and will ensure that it triggers\n the :attr:`before_first_request_funcs` and only exactly once per\n application instance (which means process usually).\n\n :internal:\n \"\"\"\n if self._got_first_request:\n return\n with self._before_request_lock:\n if self._got_first_request:\n return\n for func in self.before_first_request_funcs:\n self.ensure_sync(func)()\n self._got_first_request = True\n\n def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1546, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " self.ensure_sync(func)()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1549, "name": "make_default_options_response", "kind": "def", "category": "function", "info": " def make_default_options_response(self) -> Response:\n \"\"\"This method is called to create the default ``OPTIONS`` response.\n This can be changed through subclassing to change the default\n behavior of ``OPTIONS`` responses.\n\n .. versionadded:: 0.7\n \"\"\"\n adapter = _request_ctx_stack.top.url_adapter\n methods = adapter.allowed_methods()\n rv = self.response_class()\n rv.allow.update(methods)\n return rv\n\n def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1558, "name": "response_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1562, "name": "should_ignore_error", "kind": "def", "category": "function", "info": " def should_ignore_error(self, error: t.Optional[BaseException]) -> bool:\n \"\"\"This is called to figure out if an error should be ignored\n or not as far as the teardown system is concerned. If this\n function returns ``True`` then the teardown handlers will not be\n passed the error.\n\n .. versionadded:: 0.10\n \"\"\"\n return False\n\n def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1572, "name": "ensure_sync", "kind": "def", "category": "function", "info": " def ensure_sync(self, func: t.Callable) -> t.Callable:\n \"\"\"Ensure that the function is synchronous for WSGI workers.\n Plain ``def`` functions are returned as-is. ``async def``\n functions are wrapped to run and wait for the response.\n\n Override this method to change how the app runs async views.\n\n .. versionadded:: 2.0\n \"\"\"\n if iscoroutinefunction(func):\n return self.async_to_sync(func)\n\n return func\n\n def async_to_sync(\n self, func: t.Callable[..., t.Coroutine]\n ) -> t.Callable[..., t.Any]:\n \"\"\"Return a sync function that will run the coroutine function.\n\n .. code-block:: python\n\n result = app.async_to_sync(func)(*args, **kwargs)\n\n Override this method to change how the app converts async code\n to be synchronously callable.\n\n .. versionadded:: 2.0\n \"\"\"\n try:\n from asgiref.sync import async_to_sync as asgiref_async_to_sync\n except ImportError:\n raise RuntimeError(\n \"Install Flask with the 'async' extra in order to use async views.\"\n )\n\n # Check that Werkzeug isn't using its fallback ContextVar class.\n if ContextVar.__module__ == \"werkzeug.local\":\n raise RuntimeError(\n \"Async cannot be used with this combination of Python \"\n \"and Greenlet versions.\"\n )\n\n return asgiref_async_to_sync(func)\n\n def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1614, "name": "asgiref_async_to_sync", "kind": "ref", "category": "function", "info": " return asgiref_async_to_sync(func)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1616, "name": "make_response", "kind": "def", "category": "function", "info": " def make_response(self, rv: ResponseReturnValue) -> Response:\n \"\"\"Convert the return value from a view function to an instance of\n :attr:`response_class`.\n\n :param rv: the return value from the view function. The view function\n must return a response. Returning ``None``, or the view ending\n without returning, is not allowed. The following types are allowed\n for ``view_rv``:\n\n ``str``\n A response object is created with the string encoded to UTF-8\n as the body.\n\n ``bytes``\n A response object is created with the bytes as the body.\n\n ``dict``\n A dictionary that will be jsonify'd before being returned.\n\n ``tuple``\n Either ``(body, status, headers)``, ``(body, status)``, or\n ``(body, headers)``, where ``body`` is any of the other types\n allowed here, ``status`` is a string or an integer, and\n ``headers`` is a dictionary or a list of ``(key, value)``\n tuples. If ``body`` is a :attr:`response_class` instance,\n ``status`` overwrites the exiting value and ``headers`` are\n extended.\n\n :attr:`response_class`\n The object is returned unchanged.\n\n other :class:`~werkzeug.wrappers.Response` class\n The object is coerced to :attr:`response_class`.\n\n :func:`callable`\n The function is called as a WSGI application. The result is\n used to create a response object.\n\n .. versionchanged:: 0.9\n Previously a tuple was interpreted as the arguments for the\n response object.\n \"\"\"\n\n status = headers = None\n\n # unpack tuple returns\n if isinstance(rv, tuple):\n len_rv = len(rv)\n\n # a 3-tuple is unpacked directly\n if len_rv == 3:\n rv, status, headers = rv\n # decide if a 2-tuple has status or headers\n elif len_rv == 2:\n if isinstance(rv[1], (Headers, dict, tuple, list)):\n rv, headers = rv\n else:\n rv, status = rv\n # other sized tuples are not allowed\n else:\n raise TypeError(\n \"The view function did not return a valid response tuple.\"\n \" The tuple must have the form (body, status, headers),\"\n \" (body, status), or (body, headers).\"\n )\n\n # the body must not be None\n if rv is None:\n raise TypeError(\n f\"The view function for {request.endpoint!r} did not\"\n \" return a valid response. The function either returned\"\n \" None or ended without a return statement.\"\n )\n\n # make sure the body is an instance of the response class\n if not isinstance(rv, self.response_class):\n if isinstance(rv, (str, bytes, bytearray)):\n # let the response class set the status and headers instead of\n # waiting to do it manually, so that the class can handle any\n # special logic\n rv = self.response_class(rv, status=status, headers=headers)\n status = headers = None\n elif isinstance(rv, dict):\n rv = jsonify(rv)\n elif isinstance(rv, BaseResponse) or callable(rv):\n # evaluate a WSGI callable, or coerce a different response\n # class to the correct type\n try:\n rv = self.response_class.force_type(rv, request.environ) # type: ignore # noqa: B950\n except TypeError as e:\n raise TypeError(\n f\"{e}\\nThe view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n ).with_traceback(sys.exc_info()[2])\n else:\n raise TypeError(\n \"The view function did not return a valid\"\n \" response. The return type must be a string,\"\n \" dict, tuple, Response instance, or WSGI\"\n f\" callable, but it was a {type(rv).__name__}.\"\n )\n\n rv = t.cast(Response, rv)\n # prefer the status if it was provided\n if status is not None:\n if isinstance(status, (str, bytes, bytearray)):\n rv.status = status # type: ignore\n else:\n rv.status_code = status\n\n # extend existing headers with provided headers\n if headers:\n rv.headers.update(headers)\n\n return rv\n\n def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1696, "name": "response_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1699, "name": "jsonify", "kind": "ref", "category": "function", "info": " rv = jsonify(rv)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1734, "name": "create_url_adapter", "kind": "def", "category": "function", "info": " def create_url_adapter(\n self, request: t.Optional[Request]\n ) -> t.Optional[MapAdapter]:\n \"\"\"Creates a URL adapter for the given request. The URL adapter\n is created at a point where the request context is not yet set\n up so the request is passed explicitly.\n\n .. versionadded:: 0.6\n\n .. versionchanged:: 0.9\n This can now also be called without a request object when the\n URL adapter is created for the application context.\n\n .. versionchanged:: 1.0\n :data:`SERVER_NAME` no longer implicitly enables subdomain\n matching. Use :attr:`subdomain_matching` instead.\n \"\"\"\n if request is not None:\n # If subdomain matching is disabled (the default), use the\n # default subdomain in all cases. This should be the default\n # in Werkzeug but it currently does not have that feature.\n if not self.subdomain_matching:\n subdomain = self.url_map.default_subdomain or None\n else:\n subdomain = None\n\n return self.url_map.bind_to_environ(\n request.environ,\n server_name=self.config[\"SERVER_NAME\"],\n subdomain=subdomain,\n )\n # We need at the very least the server name to be set for this\n # to work.\n if self.config[\"SERVER_NAME\"] is not None:\n return self.url_map.bind(\n self.config[\"SERVER_NAME\"],\n script_name=self.config[\"APPLICATION_ROOT\"],\n url_scheme=self.config[\"PREFERRED_URL_SCHEME\"],\n )\n\n return None\n\n def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1776, "name": "inject_url_defaults", "kind": "def", "category": "function", "info": " def inject_url_defaults(self, endpoint: str, values: dict) -> None:\n \"\"\"Injects the URL defaults for the given endpoint directly into\n the values dictionary passed. This is used internally and\n automatically called on URL building.\n\n .. versionadded:: 0.7\n \"\"\"\n funcs: t.Iterable[URLDefaultCallable] = self.url_default_functions[None]\n if \".\" in endpoint:\n bp = endpoint.rsplit(\".\", 1)[0]\n funcs = chain(funcs, self.url_default_functions[bp])\n for func in funcs:\n func(endpoint, values)\n\n def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1788, "name": "func", "kind": "ref", "category": "function", "info": " func(endpoint, values)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1790, "name": "handle_url_build_error", "kind": "def", "category": "function", "info": " def handle_url_build_error(\n self, error: Exception, endpoint: str, values: dict\n ) -> str:\n \"\"\"Handle :class:`~werkzeug.routing.BuildError` on\n :meth:`url_for`.\n \"\"\"\n for handler in self.url_build_error_handlers:\n try:\n rv = handler(error, endpoint, values)\n except BuildError as e:\n # make error available outside except block\n error = e\n else:\n if rv is not None:\n return rv\n\n # Re-raise if called with an active exception, otherwise raise\n # the passed in exception.\n if error is sys.exc_info()[1]:\n raise\n\n raise error\n\n def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1798, "name": "handler", "kind": "ref", "category": "function", "info": " rv = handler(error, endpoint, values)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1813, "name": "preprocess_request", "kind": "def", "category": "function", "info": " def preprocess_request(self) -> t.Optional[ResponseReturnValue]:\n \"\"\"Called before the request is dispatched. Calls\n :attr:`url_value_preprocessors` registered with the app and the\n current blueprint (if any). Then calls :attr:`before_request_funcs`\n registered with the app and the blueprint.\n\n If any :meth:`before_request` handler returns a non-None value, the\n value is handled as if it was the return value from the view, and\n further request handling is stopped.\n \"\"\"\n\n funcs: t.Iterable[URLValuePreprocessorCallable] = self.url_value_preprocessors[\n None\n ]\n for bp in self._request_blueprints():\n if bp in self.url_value_preprocessors:\n funcs = chain(funcs, self.url_value_preprocessors[bp])\n for func in funcs:\n func(request.endpoint, request.view_args)\n\n funcs: t.Iterable[BeforeRequestCallable] = self.before_request_funcs[None]\n for bp in self._request_blueprints():\n if bp in self.before_request_funcs:\n funcs = chain(funcs, self.before_request_funcs[bp])\n for func in funcs:\n rv = self.ensure_sync(func)()\n if rv is not None:\n return rv\n\n return None\n\n def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1827, "name": "_request_blueprints", "kind": "ref", "category": "function", "info": " for bp in self._request_blueprints():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1831, "name": "func", "kind": "ref", "category": "function", "info": " func(request.endpoint, request.view_args)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1834, "name": "_request_blueprints", "kind": "ref", "category": "function", "info": " for bp in self._request_blueprints():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1838, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " rv = self.ensure_sync(func)()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1844, "name": "process_response", "kind": "def", "category": "function", "info": " def process_response(self, response: Response) -> Response:\n \"\"\"Can be overridden in order to modify the response object\n before it's sent to the WSGI server. By default this will\n call all the :meth:`after_request` decorated functions.\n\n .. versionchanged:: 0.5\n As of Flask 0.5 the functions registered for after request\n execution are called in reverse order of registration.\n\n :param response: a :attr:`response_class` object.\n :return: a new response object or the same, has to be an\n instance of :attr:`response_class`.\n \"\"\"\n ctx = _request_ctx_stack.top\n funcs: t.Iterable[AfterRequestCallable] = ctx._after_request_functions\n for bp in self._request_blueprints():\n if bp in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[bp]))\n if None in self.after_request_funcs:\n funcs = chain(funcs, reversed(self.after_request_funcs[None]))\n for handler in funcs:\n response = self.ensure_sync(handler)(response)\n if not self.session_interface.is_null_session(ctx.session):\n self.session_interface.save_session(self, ctx.session, response)\n return response\n\n def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1859, "name": "_request_blueprints", "kind": "ref", "category": "function", "info": " for bp in self._request_blueprints():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1865, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " response = self.ensure_sync(handler)(response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1866, "name": "is_null_session", "kind": "ref", "category": "function", "info": " if not self.session_interface.is_null_session(ctx.session):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1867, "name": "save_session", "kind": "ref", "category": "function", "info": " self.session_interface.save_session(self, ctx.session, response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1870, "name": "do_teardown_request", "kind": "def", "category": "function", "info": " def do_teardown_request(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called after the request is dispatched and the response is\n returned, right before the request context is popped.\n\n This calls all functions decorated with\n :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`\n if a blueprint handled the request. Finally, the\n :data:`request_tearing_down` signal is sent.\n\n This is called by\n :meth:`RequestContext.pop() `,\n which may be delayed during testing to maintain access to\n resources.\n\n :param exc: An unhandled exception raised while dispatching the\n request. Detected from the current exception information if\n not passed. Passed to each teardown function.\n\n .. versionchanged:: 0.9\n Added the ``exc`` argument.\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n funcs: t.Iterable[TeardownCallable] = reversed(\n self.teardown_request_funcs[None]\n )\n for bp in self._request_blueprints():\n if bp in self.teardown_request_funcs:\n funcs = chain(funcs, reversed(self.teardown_request_funcs[bp]))\n for func in funcs:\n self.ensure_sync(func)(exc)\n request_tearing_down.send(self, exc=exc)\n\n def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1898, "name": "_request_blueprints", "kind": "ref", "category": "function", "info": " for bp in self._request_blueprints():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1902, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " self.ensure_sync(func)(exc)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1903, "name": "send", "kind": "ref", "category": "function", "info": " request_tearing_down.send(self, exc=exc)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1905, "name": "do_teardown_appcontext", "kind": "def", "category": "function", "info": " def do_teardown_appcontext(\n self, exc: t.Optional[BaseException] = _sentinel # type: ignore\n ) -> None:\n \"\"\"Called right before the application context is popped.\n\n When handling a request, the application context is popped\n after the request context. See :meth:`do_teardown_request`.\n\n This calls all functions decorated with\n :meth:`teardown_appcontext`. Then the\n :data:`appcontext_tearing_down` signal is sent.\n\n This is called by\n :meth:`AppContext.pop() `.\n\n .. versionadded:: 0.9\n \"\"\"\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n for func in reversed(self.teardown_appcontext_funcs):\n self.ensure_sync(func)(exc)\n appcontext_tearing_down.send(self, exc=exc)\n\n def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1925, "name": "ensure_sync", "kind": "ref", "category": "function", "info": " self.ensure_sync(func)(exc)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1926, "name": "send", "kind": "ref", "category": "function", "info": " appcontext_tearing_down.send(self, exc=exc)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1928, "name": "app_context", "kind": "def", "category": "function", "info": " def app_context(self) -> AppContext:\n \"\"\"Create an :class:`~flask.ctx.AppContext`. Use as a ``with``\n block to push the context, which will make :data:`current_app`\n point at this application.\n\n An application context is automatically pushed by\n :meth:`RequestContext.push() `\n when handling a request, and when running a CLI command. Use\n this to manually create a context outside of these situations.\n\n ::\n\n with app.app_context():\n init_db()\n\n See :doc:`/appcontext`.\n\n .. versionadded:: 0.9\n \"\"\"\n return AppContext(self)\n\n def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1947, "name": "AppContext", "kind": "ref", "category": "function", "info": " return AppContext(self)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1949, "name": "request_context", "kind": "def", "category": "function", "info": " def request_context(self, environ: dict) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` representing a\n WSGI environment. Use a ``with`` block to push the context,\n which will make :data:`request` point at this request.\n\n See :doc:`/reqcontext`.\n\n Typically you should not call this from your own code. A request\n context is automatically pushed by the :meth:`wsgi_app` when\n handling a request. Use :meth:`test_request_context` to create\n an environment and context instead of this method.\n\n :param environ: a WSGI environment\n \"\"\"\n return RequestContext(self, environ)\n\n def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1963, "name": "RequestContext", "kind": "ref", "category": "function", "info": " return RequestContext(self, environ)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 1965, "name": "test_request_context", "kind": "def", "category": "function", "info": " def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:\n \"\"\"Create a :class:`~flask.ctx.RequestContext` for a WSGI\n environment created from the given values. This is mostly useful\n during testing, where you may want to run a function that uses\n request data without dispatching a full request.\n\n See :doc:`/reqcontext`.\n\n Use a ``with`` block to push the context, which will make\n :data:`request` point at the request for the created\n environment. ::\n\n with test_request_context(...):\n generate_report()\n\n When using the shell, it may be easier to push and pop the\n context manually to avoid indentation. ::\n\n ctx = app.test_request_context(...)\n ctx.push()\n ...\n ctx.pop()\n\n Takes the same arguments as Werkzeug's\n :class:`~werkzeug.test.EnvironBuilder`, with some defaults from\n the application. See the linked Werkzeug docs for most of the\n available arguments. Flask-specific behavior is listed here.\n\n :param path: URL path being requested.\n :param base_url: Base URL where the app is being served, which\n ``path`` is relative to. If not given, built from\n :data:`PREFERRED_URL_SCHEME`, ``subdomain``,\n :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.\n :param subdomain: Subdomain name to append to\n :data:`SERVER_NAME`.\n :param url_scheme: Scheme to use instead of\n :data:`PREFERRED_URL_SCHEME`.\n :param data: The request body, either as a string or a dict of\n form keys and values.\n :param json: If given, this is serialized as JSON and passed as\n ``data``. Also defaults ``content_type`` to\n ``application/json``.\n :param args: other positional arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n :param kwargs: other keyword arguments passed to\n :class:`~werkzeug.test.EnvironBuilder`.\n \"\"\"\n from .testing import EnvironBuilder\n\n builder = EnvironBuilder(self, *args, **kwargs)\n\n try:\n return self.request_context(builder.get_environ())\n finally:\n builder.close()\n\n def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2014, "name": "EnvironBuilder", "kind": "ref", "category": "function", "info": " builder = EnvironBuilder(self, *args, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2017, "name": "request_context", "kind": "ref", "category": "function", "info": " return self.request_context(builder.get_environ())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2017, "name": "get_environ", "kind": "ref", "category": "function", "info": " return self.request_context(builder.get_environ())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2021, "name": "wsgi_app", "kind": "def", "category": "function", "info": " def wsgi_app(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The actual WSGI application. This is not implemented in\n :meth:`__call__` so that middlewares can be applied without\n losing a reference to the app object. Instead of doing this::\n\n app = MyMiddleware(app)\n\n It's a better idea to do this instead::\n\n app.wsgi_app = MyMiddleware(app.wsgi_app)\n\n Then you still have the original application object around and\n can continue to call methods on it.\n\n .. versionchanged:: 0.7\n Teardown events for the request and app contexts are called\n even if an unhandled error occurs. Other events may not be\n called depending on when an error occurs during dispatch.\n See :ref:`callbacks-and-errors`.\n\n :param environ: A WSGI environment.\n :param start_response: A callable accepting a status code,\n a list of headers, and an optional exception context to\n start the response.\n \"\"\"\n ctx = self.request_context(environ)\n error: t.Optional[BaseException] = None\n try:\n try:\n ctx.push()\n response = self.full_dispatch_request()\n except Exception as e:\n error = e\n response = self.handle_exception(e)\n except: # noqa: B001\n error = sys.exc_info()[1]\n raise\n return response(environ, start_response)\n finally:\n if self.should_ignore_error(error):\n error = None\n ctx.auto_pop(error)\n\n def __call__(self, environ: dict, start_response: t.Callable) -> t.Any:\n \"\"\"The WSGI server calls the Flask application object as the\n WSGI application. This calls :meth:`wsgi_app`, which can be\n wrapped to apply middleware.\n \"\"\"\n return self.wsgi_app(environ, start_response)\n\n def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2046, "name": "request_context", "kind": "ref", "category": "function", "info": " ctx = self.request_context(environ)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2050, "name": "push", "kind": "ref", "category": "function", "info": " ctx.push()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2051, "name": "full_dispatch_request", "kind": "ref", "category": "function", "info": " response = self.full_dispatch_request()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2054, "name": "handle_exception", "kind": "ref", "category": "function", "info": " response = self.handle_exception(e)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2058, "name": "response", "kind": "ref", "category": "function", "info": " return response(environ, start_response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2060, "name": "should_ignore_error", "kind": "ref", "category": "function", "info": " if self.should_ignore_error(error):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2062, "name": "auto_pop", "kind": "ref", "category": "function", "info": " ctx.auto_pop(error)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2069, "name": "wsgi_app", "kind": "ref", "category": "function", "info": " return self.wsgi_app(environ, start_response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/app.py", "rel_fname": "src/flask/app.py", "line": 2071, "name": "_request_blueprints", "kind": "def", "category": "function", "info": " def _request_blueprints(self) -> t.Iterable[str]:\n if _request_ctx_stack.top.request.blueprint is None:\n return []\n else:\n return reversed(_request_ctx_stack.top.request.blueprint.split(\".\"))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 24, "name": "BlueprintSetupState", "kind": "def", "category": "class", "info": "__init__\tadd_url_rule"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 76, "name": "add_url_rule", "kind": "def", "category": "function", "info": " def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"A helper method to register a rule (and optionally a view function)\n to the application. The endpoint is automatically prefixed with the\n blueprint's name.\n \"\"\"\n if self.url_prefix is not None:\n if rule:\n rule = \"/\".join((self.url_prefix.rstrip(\"/\"), rule.lstrip(\"/\")))\n else:\n rule = self.url_prefix\n options.setdefault(\"subdomain\", self.subdomain)\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n defaults = self.url_defaults\n if \"defaults\" in options:\n defaults = dict(defaults, **options.pop(\"defaults\"))\n self.app.add_url_rule(\n rule,\n f\"{self.name_prefix}{self.blueprint.name}.{endpoint}\",\n view_func,\n defaults=defaults,\n **options,\n )\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 94, "name": "_endpoint_from_view_func", "kind": "ref", "category": "function", "info": " endpoint = _endpoint_from_view_func(view_func) # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 98, "name": "add_url_rule", "kind": "ref", "category": "function", "info": " self.app.add_url_rule(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 107, "name": "Blueprint", "kind": "def", "category": "class", "info": "__init__\t_is_setup_finished\trecord\trecord_once\tmake_setup_state\tregister_blueprint\tregister\tadd_url_rule\tapp_template_filter\tadd_app_template_filter\tapp_template_test\tadd_app_template_test\tapp_template_global\tadd_app_template_global\tbefore_app_request\tbefore_app_first_request\tafter_app_request\tteardown_app_request\tapp_context_processor\tapp_errorhandler\tapp_url_value_preprocessor\tapp_url_defaults"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 202, "name": "_is_setup_finished", "kind": "def", "category": "function", "info": " def _is_setup_finished(self) -> bool:\n return self.warn_on_modifications and self._got_registered_once\n\n def record(self, func: t.Callable) -> None:\n \"\"\"Registers a function that is called when the blueprint is\n registered on the application. This function is called with the\n state as argument as returned by the :meth:`make_setup_state`\n method.\n \"\"\"\n if self._got_registered_once and self.warn_on_modifications:\n from warnings import warn\n\n warn(\n Warning(\n \"The blueprint was already registered once but is\"\n \" getting modified now. These changes will not show\"\n \" up.\"\n )\n )\n self.deferred_functions.append(func)\n\n def record_once(self, func: t.Callable) -> None:\n \"\"\"Works like :meth:`record` but wraps the function in another\n function that will ensure the function is only called once. If the\n blueprint is registered a second time on the application, the\n function passed is not called.\n \"\"\"\n\n def wrapper(state: BlueprintSetupState) -> None:\n if state.first_registration:\n func(state)\n\n return self.record(update_wrapper(wrapper, func))\n\n def make_setup_state(\n self, app: \"Flask\", options: dict, first_registration: bool = False\n ) -> BlueprintSetupState:\n \"\"\"Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`\n object that is later passed to the register callback functions.\n Subclasses can override this to return a subclass of the setup state.\n \"\"\"\n return BlueprintSetupState(self, app, options, first_registration)\n\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on this blueprint. Keyword\n arguments passed to this method will override the defaults set\n on the blueprint.\n\n .. versionadded:: 2.0\n \"\"\"\n self._blueprints.append((blueprint, options))\n\n def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 205, "name": "record", "kind": "def", "category": "function", "info": " def record(self, func: t.Callable) -> None:\n \"\"\"Registers a function that is called when the blueprint is\n registered on the application. This function is called with the\n state as argument as returned by the :meth:`make_setup_state`\n method.\n \"\"\"\n if self._got_registered_once and self.warn_on_modifications:\n from warnings import warn\n\n warn(\n Warning(\n \"The blueprint was already registered once but is\"\n \" getting modified now. These changes will not show\"\n \" up.\"\n )\n )\n self.deferred_functions.append(func)\n\n def record_once(self, func: t.Callable) -> None:\n \"\"\"Works like :meth:`record` but wraps the function in another\n function that will ensure the function is only called once. If the\n blueprint is registered a second time on the application, the\n function passed is not called.\n \"\"\"\n\n def wrapper(state: BlueprintSetupState) -> None:\n if state.first_registration:\n func(state)\n\n return self.record(update_wrapper(wrapper, func))\n\n def make_setup_state(\n self, app: \"Flask\", options: dict, first_registration: bool = False\n ) -> BlueprintSetupState:\n \"\"\"Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`\n object that is later passed to the register callback functions.\n Subclasses can override this to return a subclass of the setup state.\n \"\"\"\n return BlueprintSetupState(self, app, options, first_registration)\n\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on this blueprint. Keyword\n arguments passed to this method will override the defaults set\n on the blueprint.\n\n .. versionadded:: 2.0\n \"\"\"\n self._blueprints.append((blueprint, options))\n\n def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 223, "name": "record_once", "kind": "def", "category": "function", "info": " def record_once(self, func: t.Callable) -> None:\n \"\"\"Works like :meth:`record` but wraps the function in another\n function that will ensure the function is only called once. If the\n blueprint is registered a second time on the application, the\n function passed is not called.\n \"\"\"\n\n def wrapper(state: BlueprintSetupState) -> None:\n if state.first_registration:\n func(state)\n\n return self.record(update_wrapper(wrapper, func))\n\n def make_setup_state(\n self, app: \"Flask\", options: dict, first_registration: bool = False\n ) -> BlueprintSetupState:\n \"\"\"Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`\n object that is later passed to the register callback functions.\n Subclasses can override this to return a subclass of the setup state.\n \"\"\"\n return BlueprintSetupState(self, app, options, first_registration)\n\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on this blueprint. Keyword\n arguments passed to this method will override the defaults set\n on the blueprint.\n\n .. versionadded:: 2.0\n \"\"\"\n self._blueprints.append((blueprint, options))\n\n def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 230, "name": "wrapper", "kind": "def", "category": "function", "info": " def wrapper(state: BlueprintSetupState) -> None:\n if state.first_registration:\n func(state)\n\n return self.record(update_wrapper(wrapper, func))\n\n def make_setup_state(\n self, app: \"Flask\", options: dict, first_registration: bool = False\n ) -> BlueprintSetupState:\n \"\"\"Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`\n object that is later passed to the register callback functions.\n Subclasses can override this to return a subclass of the setup state.\n \"\"\"\n return BlueprintSetupState(self, app, options, first_registration)\n\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on this blueprint. Keyword\n arguments passed to this method will override the defaults set\n on the blueprint.\n\n .. versionadded:: 2.0\n \"\"\"\n self._blueprints.append((blueprint, options))\n\n def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 232, "name": "func", "kind": "ref", "category": "function", "info": " func(state)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 234, "name": "record", "kind": "ref", "category": "function", "info": " return self.record(update_wrapper(wrapper, func))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 236, "name": "make_setup_state", "kind": "def", "category": "function", "info": " def make_setup_state(\n self, app: \"Flask\", options: dict, first_registration: bool = False\n ) -> BlueprintSetupState:\n \"\"\"Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`\n object that is later passed to the register callback functions.\n Subclasses can override this to return a subclass of the setup state.\n \"\"\"\n return BlueprintSetupState(self, app, options, first_registration)\n\n def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on this blueprint. Keyword\n arguments passed to this method will override the defaults set\n on the blueprint.\n\n .. versionadded:: 2.0\n \"\"\"\n self._blueprints.append((blueprint, options))\n\n def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 243, "name": "BlueprintSetupState", "kind": "ref", "category": "function", "info": " return BlueprintSetupState(self, app, options, first_registration)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 245, "name": "register_blueprint", "kind": "def", "category": "function", "info": " def register_blueprint(self, blueprint: \"Blueprint\", **options: t.Any) -> None:\n \"\"\"Register a :class:`~flask.Blueprint` on this blueprint. Keyword\n arguments passed to this method will override the defaults set\n on the blueprint.\n\n .. versionadded:: 2.0\n \"\"\"\n self._blueprints.append((blueprint, options))\n\n def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 254, "name": "register", "kind": "def", "category": "function", "info": " def register(self, app: \"Flask\", options: dict) -> None:\n \"\"\"Called by :meth:`Flask.register_blueprint` to register all\n views and callbacks registered on the blueprint with the\n application. Creates a :class:`.BlueprintSetupState` and calls\n each :meth:`record` callbackwith it.\n\n :param app: The application this blueprint is being registered\n with.\n :param options: Keyword arguments forwarded from\n :meth:`~Flask.register_blueprint`.\n :param first_registration: Whether this is the first time this\n blueprint has been registered on the application.\n \"\"\"\n first_registration = False\n\n if self.name in app.blueprints:\n assert app.blueprints[self.name] is self, (\n \"A name collision occurred between blueprints\"\n f\" {self!r} and {app.blueprints[self.name]!r}.\"\n f\" Both share the same name {self.name!r}.\"\n f\" Blueprints that are created on the fly need unique\"\n f\" names.\"\n )\n else:\n app.blueprints[self.name] = self\n first_registration = True\n\n self._got_registered_once = True\n state = self.make_setup_state(app, options, first_registration)\n\n if self.has_static_folder:\n state.add_url_rule(\n f\"{self.static_url_path}/\",\n view_func=self.send_static_file,\n endpoint=\"static\",\n )\n\n # Merge blueprint data into parent.\n if first_registration:\n\n def extend(bp_dict, parent_dict):\n for key, values in bp_dict.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n\n parent_dict[key].extend(values)\n\n for key, value in self.error_handler_spec.items():\n key = self.name if key is None else f\"{self.name}.{key}\"\n value = defaultdict(\n dict,\n {\n code: {\n exc_class: func for exc_class, func in code_values.items()\n }\n for code, code_values in value.items()\n },\n )\n app.error_handler_spec[key] = value\n\n for endpoint, func in self.view_functions.items():\n app.view_functions[endpoint] = func\n\n extend(self.before_request_funcs, app.before_request_funcs)\n extend(self.after_request_funcs, app.after_request_funcs)\n extend(\n self.teardown_request_funcs,\n app.teardown_request_funcs,\n )\n extend(self.url_default_functions, app.url_default_functions)\n extend(self.url_value_preprocessors, app.url_value_preprocessors)\n extend(self.template_context_processors, app.template_context_processors)\n\n for deferred in self.deferred_functions:\n deferred(state)\n\n cli_resolved_group = options.get(\"cli_group\", self.cli_group)\n\n if self.cli.commands:\n if cli_resolved_group is None:\n app.cli.commands.update(self.cli.commands)\n elif cli_resolved_group is _sentinel:\n self.cli.name = self.name\n app.cli.add_command(self.cli)\n else:\n self.cli.name = cli_resolved_group\n app.cli.add_command(self.cli)\n\n for blueprint, bp_options in self._blueprints:\n url_prefix = options.get(\"url_prefix\", \"\")\n if \"url_prefix\" in bp_options:\n url_prefix = (\n url_prefix.rstrip(\"/\") + \"/\" + bp_options[\"url_prefix\"].lstrip(\"/\")\n )\n\n bp_options[\"url_prefix\"] = url_prefix\n bp_options[\"name_prefix\"] = options.get(\"name_prefix\", \"\") + self.name + \".\"\n blueprint.register(app, bp_options)\n\n def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for\n the :func:`url_for` function is prefixed with the name of the blueprint.\n \"\"\"\n if endpoint:\n assert \".\" not in endpoint, \"Blueprint endpoints should not contain dots\"\n if view_func and hasattr(view_func, \"__name__\"):\n assert (\n \".\" not in view_func.__name__\n ), \"Blueprint view function name should not contain dots\"\n self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n\n def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 282, "name": "make_setup_state", "kind": "ref", "category": "function", "info": " state = self.make_setup_state(app, options, first_registration)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 285, "name": "add_url_rule", "kind": "ref", "category": "function", "info": " state.add_url_rule(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 327, "name": "deferred", "kind": "ref", "category": "function", "info": " deferred(state)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 336, "name": "add_command", "kind": "ref", "category": "function", "info": " app.cli.add_command(self.cli)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 339, "name": "add_command", "kind": "ref", "category": "function", "info": " app.cli.add_command(self.cli)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 350, "name": "register", "kind": "ref", "category": "function", "info": " blueprint.register(app, bp_options)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 352, "name": "add_url_rule", "kind": "def", "category": "function", "info": " def add_url_rule(\n self,\n rule: str,\n endpoint: t.Optional[str] = None,\n view_func: t.Optional[t.Callable] = None,\n **options: t.Any,\n ) -> None:\n \"\"\"A helper method to register a rule (and optionally a view function)\n to the application. The endpoint is automatically prefixed with the\n blueprint's name.\n \"\"\"\n if self.url_prefix is not None:\n if rule:\n rule = \"/\".join((self.url_prefix.rstrip(\"/\"), rule.lstrip(\"/\")))\n else:\n rule = self.url_prefix\n options.setdefault(\"subdomain\", self.subdomain)\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func) # type: ignore\n defaults = self.url_defaults\n if \"defaults\" in options:\n defaults = dict(defaults, **options.pop(\"defaults\"))\n self.app.add_url_rule(\n rule,\n f\"{self.name_prefix}{self.blueprint.name}.{endpoint}\",\n view_func,\n defaults=defaults,\n **options,\n )\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 368, "name": "record", "kind": "ref", "category": "function", "info": " self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 368, "name": "add_url_rule", "kind": "ref", "category": "function", "info": " self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 370, "name": "app_template_filter", "kind": "def", "category": "function", "info": " def app_template_filter(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.template_filter` but for a blueprint.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 378, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: TemplateFilterCallable) -> TemplateFilterCallable:\n self.add_app_template_filter(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 379, "name": "add_app_template_filter", "kind": "ref", "category": "function", "info": " self.add_app_template_filter(f, name=name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 384, "name": "add_app_template_filter", "kind": "def", "category": "function", "info": " def add_app_template_filter(\n self, f: TemplateFilterCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template filter, available application wide. Like\n :meth:`Flask.add_template_filter` but for a blueprint. Works exactly\n like the :meth:`app_template_filter` decorator.\n\n :param name: the optional name of the filter, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 395, "name": "register_template", "kind": "def", "category": "function", "info": " def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 398, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(register_template)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 400, "name": "app_template_test", "kind": "def", "category": "function", "info": " def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 410, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 411, "name": "add_app_template_test", "kind": "ref", "category": "function", "info": " self.add_app_template_test(f, name=name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 416, "name": "add_app_template_test", "kind": "def", "category": "function", "info": " def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 429, "name": "register_template", "kind": "def", "category": "function", "info": " def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 432, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(register_template)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 434, "name": "app_template_global", "kind": "def", "category": "function", "info": " def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 444, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 445, "name": "add_app_template_global", "kind": "ref", "category": "function", "info": " self.add_app_template_global(f, name=name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 450, "name": "add_app_template_global", "kind": "def", "category": "function", "info": " def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 463, "name": "register_template", "kind": "def", "category": "function", "info": " def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.filters[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_test(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.template_test` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateTestCallable) -> TemplateTestCallable:\n self.add_app_template_test(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_test(\n self, f: TemplateTestCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template test, available application wide. Like\n :meth:`Flask.add_template_test` but for a blueprint. Works exactly\n like the :meth:`app_template_test` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the test, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.tests[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def app_template_global(self, name: t.Optional[str] = None) -> t.Callable:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.template_global` but for a blueprint.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def decorator(f: TemplateGlobalCallable) -> TemplateGlobalCallable:\n self.add_app_template_global(f, name=name)\n return f\n\n return decorator\n\n def add_app_template_global(\n self, f: TemplateGlobalCallable, name: t.Optional[str] = None\n ) -> None:\n \"\"\"Register a custom template global, available application wide. Like\n :meth:`Flask.add_template_global` but for a blueprint. Works exactly\n like the :meth:`app_template_global` decorator.\n\n .. versionadded:: 0.10\n\n :param name: the optional name of the global, otherwise the\n function name will be used.\n \"\"\"\n\n def register_template(state: BlueprintSetupState) -> None:\n state.app.jinja_env.globals[name or f.__name__] = f\n\n self.record_once(register_template)\n\n def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 466, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(register_template)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 468, "name": "before_app_request", "kind": "def", "category": "function", "info": " def before_app_request(self, f: BeforeRequestCallable) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_request`. Such a function is executed\n before each request, even if outside of a blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 472, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 477, "name": "before_app_first_request", "kind": "def", "category": "function", "info": " def before_app_first_request(\n self, f: BeforeRequestCallable\n ) -> BeforeRequestCallable:\n \"\"\"Like :meth:`Flask.before_first_request`. Such a function is\n executed before the first request to the application.\n \"\"\"\n self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n return f\n\n def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 483, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(lambda s: s.app.before_first_request_funcs.append(f))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 486, "name": "after_app_request", "kind": "def", "category": "function", "info": " def after_app_request(self, f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Like :meth:`Flask.after_request` but for a blueprint. Such a function\n is executed after each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 490, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 495, "name": "teardown_app_request", "kind": "def", "category": "function", "info": " def teardown_app_request(self, f: TeardownCallable) -> TeardownCallable:\n \"\"\"Like :meth:`Flask.teardown_request` but for a blueprint. Such a\n function is executed when tearing down each request, even if outside of\n the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)\n )\n return f\n\n def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 500, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 505, "name": "app_context_processor", "kind": "def", "category": "function", "info": " def app_context_processor(\n self, f: TemplateContextProcessorCallable\n ) -> TemplateContextProcessorCallable:\n \"\"\"Like :meth:`Flask.context_processor` but for a blueprint. Such a\n function is executed each request, even if outside of the blueprint.\n \"\"\"\n self.record_once(\n lambda s: s.app.template_context_processors.setdefault(None, []).append(f)\n )\n return f\n\n def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 511, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 516, "name": "app_errorhandler", "kind": "def", "category": "function", "info": " def app_errorhandler(self, code: t.Union[t.Type[Exception], int]) -> t.Callable:\n \"\"\"Like :meth:`Flask.errorhandler` but for a blueprint. This\n handler is used for all requests, even if outside of the blueprint.\n \"\"\"\n\n def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 521, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f: ErrorHandlerCallable) -> ErrorHandlerCallable:\n self.record_once(lambda s: s.app.errorhandler(code)(f))\n return f\n\n return decorator\n\n def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 522, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(lambda s: s.app.errorhandler(code)(f))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 522, "name": "errorhandler", "kind": "ref", "category": "function", "info": " self.record_once(lambda s: s.app.errorhandler(code)(f))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 527, "name": "app_url_value_preprocessor", "kind": "def", "category": "function", "info": " def app_url_value_preprocessor(\n self, f: URLValuePreprocessorCallable\n ) -> URLValuePreprocessorCallable:\n \"\"\"Same as :meth:`url_value_preprocessor` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)\n )\n return f\n\n def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 531, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 536, "name": "app_url_defaults", "kind": "def", "category": "function", "info": " def app_url_defaults(self, f: URLDefaultCallable) -> URLDefaultCallable:\n \"\"\"Same as :meth:`url_defaults` but application wide.\"\"\"\n self.record_once(\n lambda s: s.app.url_default_functions.setdefault(None, []).append(f)\n )\n return f\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/blueprints.py", "rel_fname": "src/flask/blueprints.py", "line": 538, "name": "record_once", "kind": "ref", "category": "function", "info": " self.record_once(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 32, "name": "NoAppException", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 36, "name": "find_best_app", "kind": "def", "category": "function", "info": "def find_best_app(script_info, module):\n \"\"\"Given a module instance this tries to find the best possible\n application in the module or raises an exception.\n \"\"\"\n from . import Flask\n\n # Search for the most common names first.\n for attr_name in (\"app\", \"application\"):\n app = getattr(module, attr_name, None)\n\n if isinstance(app, Flask):\n return app\n\n # Otherwise find the only object that is a Flask instance.\n matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]\n\n if len(matches) == 1:\n return matches[0]\n elif len(matches) > 1:\n raise NoAppException(\n \"Detected multiple Flask applications in module\"\n f\" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'\"\n f\" to specify the correct one.\"\n )\n\n # Search for app factory functions.\n for attr_name in (\"create_app\", \"make_app\"):\n app_factory = getattr(module, attr_name, None)\n\n if inspect.isfunction(app_factory):\n try:\n app = call_factory(script_info, app_factory)\n\n if isinstance(app, Flask):\n return app\n except TypeError:\n if not _called_with_wrong_args(app_factory):\n raise\n raise NoAppException(\n f\"Detected factory {attr_name!r} in module {module.__name__!r},\"\n \" but could not call it without arguments. Use\"\n f\" \\\"FLASK_APP='{module.__name__}:{attr_name}(args)'\\\"\"\n \" to specify arguments.\"\n )\n\n raise NoAppException(\n \"Failed to find Flask application or factory in module\"\n f\" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'\"\n \" to specify one.\"\n )\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 55, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 67, "name": "call_factory", "kind": "ref", "category": "function", "info": " app = call_factory(script_info, app_factory)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 72, "name": "_called_with_wrong_args", "kind": "ref", "category": "function", "info": " if not _called_with_wrong_args(app_factory):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 74, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 81, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 88, "name": "call_factory", "kind": "def", "category": "function", "info": "def call_factory(script_info, app_factory, args=None, kwargs=None):\n \"\"\"Takes an app factory, a ``script_info` object and optionally a tuple\n of arguments. Checks for the existence of a script_info argument and calls\n the app_factory depending on that and the arguments provided.\n \"\"\"\n sig = inspect.signature(app_factory)\n args = [] if args is None else args\n kwargs = {} if kwargs is None else kwargs\n\n if \"script_info\" in sig.parameters:\n warnings.warn(\n \"The 'script_info' argument is deprecated and will not be\"\n \" passed to the app factory function in Flask 2.1.\",\n DeprecationWarning,\n )\n kwargs[\"script_info\"] = script_info\n\n if (\n not args\n and len(sig.parameters) == 1\n and next(iter(sig.parameters.values())).default is inspect.Parameter.empty\n ):\n warnings.warn(\n \"Script info is deprecated and will not be passed as the\"\n \" single argument to the app factory function in Flask\"\n \" 2.1.\",\n DeprecationWarning,\n )\n args.append(script_info)\n\n return app_factory(*args, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 118, "name": "app_factory", "kind": "ref", "category": "function", "info": " return app_factory(*args, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 121, "name": "_called_with_wrong_args", "kind": "def", "category": "function", "info": "def _called_with_wrong_args(f):\n \"\"\"Check whether calling a function raised a ``TypeError`` because\n the call failed or because something in the factory raised the\n error.\n\n :param f: The function that was called.\n :return: ``True`` if the call failed.\n \"\"\"\n tb = sys.exc_info()[2]\n\n try:\n while tb is not None:\n if tb.tb_frame.f_code is f.__code__:\n # In the function, it was called successfully.\n return False\n\n tb = tb.tb_next\n\n # Didn't reach the function.\n return True\n finally:\n # Delete tb to break a circular reference.\n # https://docs.python.org/2/library/sys.html#sys.exc_info\n del tb\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 147, "name": "find_app_by_string", "kind": "def", "category": "function", "info": "def find_app_by_string(script_info, module, app_name):\n \"\"\"Check if the given string is a variable name or a function. Call\n a function to get the app instance, or return the variable directly.\n \"\"\"\n from . import Flask\n\n # Parse app_name as a single expression to determine if it's a valid\n # attribute name or function call.\n try:\n expr = ast.parse(app_name.strip(), mode=\"eval\").body\n except SyntaxError:\n raise NoAppException(\n f\"Failed to parse {app_name!r} as an attribute name or function call.\"\n )\n\n if isinstance(expr, ast.Name):\n name = expr.id\n args = kwargs = None\n elif isinstance(expr, ast.Call):\n # Ensure the function name is an attribute name only.\n if not isinstance(expr.func, ast.Name):\n raise NoAppException(\n f\"Function reference must be a simple name: {app_name!r}.\"\n )\n\n name = expr.func.id\n\n # Parse the positional and keyword arguments as literals.\n try:\n args = [ast.literal_eval(arg) for arg in expr.args]\n kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}\n except ValueError:\n # literal_eval gives cryptic error messages, show a generic\n # message with the full expression instead.\n raise NoAppException(\n f\"Failed to parse arguments as literal values: {app_name!r}.\"\n )\n else:\n raise NoAppException(\n f\"Failed to parse {app_name!r} as an attribute name or function call.\"\n )\n\n try:\n attr = getattr(module, name)\n except AttributeError:\n raise NoAppException(\n f\"Failed to find attribute {name!r} in {module.__name__!r}.\"\n )\n\n # If the attribute is a function, call it with any args and kwargs\n # to get the real application.\n if inspect.isfunction(attr):\n try:\n app = call_factory(script_info, attr, args, kwargs)\n except TypeError:\n if not _called_with_wrong_args(attr):\n raise\n\n raise NoAppException(\n f\"The factory {app_name!r} in module\"\n f\" {module.__name__!r} could not be called with the\"\n \" specified arguments.\"\n )\n else:\n app = attr\n\n if isinstance(app, Flask):\n return app\n\n raise NoAppException(\n \"A valid Flask application was not obtained from\"\n f\" '{module.__name__}:{app_name}'.\"\n )\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 158, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 168, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 181, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 185, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 192, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 200, "name": "call_factory", "kind": "ref", "category": "function", "info": " app = call_factory(script_info, attr, args, kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 202, "name": "_called_with_wrong_args", "kind": "ref", "category": "function", "info": " if not _called_with_wrong_args(attr):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 205, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 216, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 222, "name": "prepare_import", "kind": "def", "category": "function", "info": "def prepare_import(path):\n \"\"\"Given a filename this will try to calculate the python path, add it\n to the search path and return the actual module name that is expected.\n \"\"\"\n path = os.path.realpath(path)\n\n fname, ext = os.path.splitext(path)\n if ext == \".py\":\n path = fname\n\n if os.path.basename(path) == \"__init__\":\n path = os.path.dirname(path)\n\n module_name = []\n\n # move up until outside package structure (no __init__.py)\n while True:\n path, name = os.path.split(path)\n module_name.append(name)\n\n if not os.path.exists(os.path.join(path, \"__init__.py\")):\n break\n\n if sys.path[0] != path:\n sys.path.insert(0, path)\n\n return \".\".join(module_name[::-1])\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 226, "name": "realpath", "kind": "ref", "category": "function", "info": " path = os.path.realpath(path)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 228, "name": "splitext", "kind": "ref", "category": "function", "info": " fname, ext = os.path.splitext(path)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 232, "name": "basename", "kind": "ref", "category": "function", "info": " if os.path.basename(path) == \"__init__\":\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 233, "name": "dirname", "kind": "ref", "category": "function", "info": " path = os.path.dirname(path)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 242, "name": "exists", "kind": "ref", "category": "function", "info": " if not os.path.exists(os.path.join(path, \"__init__.py\")):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 251, "name": "locate_app", "kind": "def", "category": "function", "info": "def locate_app(script_info, module_name, app_name, raise_if_not_found=True):\n __traceback_hide__ = True # noqa: F841\n\n try:\n __import__(module_name)\n except ImportError:\n # Reraise the ImportError if it occurred within the imported module.\n # Determine this by checking whether the trace has a depth > 1.\n if sys.exc_info()[2].tb_next:\n raise NoAppException(\n f\"While importing {module_name!r}, an ImportError was\"\n f\" raised:\\n\\n{traceback.format_exc()}\"\n )\n elif raise_if_not_found:\n raise NoAppException(f\"Could not import {module_name!r}.\")\n else:\n return\n\n module = sys.modules[module_name]\n\n if app_name is None:\n return find_best_app(script_info, module)\n else:\n return find_app_by_string(script_info, module, app_name)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 260, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 265, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(f\"Could not import {module_name!r}.\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 272, "name": "find_best_app", "kind": "ref", "category": "function", "info": " return find_best_app(script_info, module)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 274, "name": "find_app_by_string", "kind": "ref", "category": "function", "info": " return find_app_by_string(script_info, module, app_name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 277, "name": "get_version", "kind": "def", "category": "function", "info": "def get_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n\n import werkzeug\n from . import __version__\n\n click.echo(\n f\"Python {platform.python_version()}\\n\"\n f\"Flask {__version__}\\n\"\n f\"Werkzeug {werkzeug.__version__}\",\n color=ctx.color,\n )\n ctx.exit()\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 303, "name": "DispatchingApp", "kind": "def", "category": "class", "info": "__init__\t_load_in_background\t_flush_bg_loading_exception\t_load_unlocked\t__call__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 320, "name": "_load_unlocked", "kind": "ref", "category": "function", "info": " self._load_unlocked()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 322, "name": "_load_in_background", "kind": "ref", "category": "function", "info": " self._load_in_background()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 324, "name": "_load_in_background", "kind": "def", "category": "function", "info": " def _load_in_background(self):\n def _load_app():\n __traceback_hide__ = True # noqa: F841\n with self._lock:\n try:\n self._load_unlocked()\n except Exception:\n self._bg_loading_exc_info = sys.exc_info()\n\n t = Thread(target=_load_app, args=())\n t.start()\n\n def _flush_bg_loading_exception(self):\n __traceback_hide__ = True # noqa: F841\n exc_info = self._bg_loading_exc_info\n if exc_info is not None:\n self._bg_loading_exc_info = None\n raise exc_info\n\n def _load_unlocked(self):\n __traceback_hide__ = True # noqa: F841\n self._app = rv = self.loader()\n self._bg_loading_exc_info = None\n return rv\n\n def __call__(self, environ, start_response):\n __traceback_hide__ = True # noqa: F841\n if self._app is not None:\n return self._app(environ, start_response)\n self._flush_bg_loading_exception()\n with self._lock:\n if self._app is not None:\n rv = self._app\n else:\n rv = self._load_unlocked()\n return rv(environ, start_response)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 325, "name": "_load_app", "kind": "def", "category": "function", "info": " def _load_app():\n __traceback_hide__ = True # noqa: F841\n with self._lock:\n try:\n self._load_unlocked()\n except Exception:\n self._bg_loading_exc_info = sys.exc_info()\n\n t = Thread(target=_load_app, args=())\n t.start()\n\n def _flush_bg_loading_exception(self):\n __traceback_hide__ = True # noqa: F841\n exc_info = self._bg_loading_exc_info\n if exc_info is not None:\n self._bg_loading_exc_info = None\n raise exc_info\n\n def _load_unlocked(self):\n __traceback_hide__ = True # noqa: F841\n self._app = rv = self.loader()\n self._bg_loading_exc_info = None\n return rv\n\n def __call__(self, environ, start_response):\n __traceback_hide__ = True # noqa: F841\n if self._app is not None:\n return self._app(environ, start_response)\n self._flush_bg_loading_exception()\n with self._lock:\n if self._app is not None:\n rv = self._app\n else:\n rv = self._load_unlocked()\n return rv(environ, start_response)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 329, "name": "_load_unlocked", "kind": "ref", "category": "function", "info": " self._load_unlocked()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 336, "name": "_flush_bg_loading_exception", "kind": "def", "category": "function", "info": " def _flush_bg_loading_exception(self):\n __traceback_hide__ = True # noqa: F841\n exc_info = self._bg_loading_exc_info\n if exc_info is not None:\n self._bg_loading_exc_info = None\n raise exc_info\n\n def _load_unlocked(self):\n __traceback_hide__ = True # noqa: F841\n self._app = rv = self.loader()\n self._bg_loading_exc_info = None\n return rv\n\n def __call__(self, environ, start_response):\n __traceback_hide__ = True # noqa: F841\n if self._app is not None:\n return self._app(environ, start_response)\n self._flush_bg_loading_exception()\n with self._lock:\n if self._app is not None:\n rv = self._app\n else:\n rv = self._load_unlocked()\n return rv(environ, start_response)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 343, "name": "_load_unlocked", "kind": "def", "category": "function", "info": " def _load_unlocked(self):\n __traceback_hide__ = True # noqa: F841\n self._app = rv = self.loader()\n self._bg_loading_exc_info = None\n return rv\n\n def __call__(self, environ, start_response):\n __traceback_hide__ = True # noqa: F841\n if self._app is not None:\n return self._app(environ, start_response)\n self._flush_bg_loading_exception()\n with self._lock:\n if self._app is not None:\n rv = self._app\n else:\n rv = self._load_unlocked()\n return rv(environ, start_response)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 345, "name": "loader", "kind": "ref", "category": "function", "info": " self._app = rv = self.loader()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 352, "name": "_app", "kind": "ref", "category": "function", "info": " return self._app(environ, start_response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 353, "name": "_flush_bg_loading_exception", "kind": "ref", "category": "function", "info": " self._flush_bg_loading_exception()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 358, "name": "_load_unlocked", "kind": "ref", "category": "function", "info": " rv = self._load_unlocked()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 359, "name": "rv", "kind": "ref", "category": "function", "info": " return rv(environ, start_response)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 362, "name": "ScriptInfo", "kind": "def", "category": "class", "info": "__init__\tload_app"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 383, "name": "load_app", "kind": "def", "category": "function", "info": " def load_app(self):\n \"\"\"Loads the Flask app (if not yet loaded) and returns it. Calling\n this multiple times will just result in the already loaded app to\n be returned.\n \"\"\"\n __traceback_hide__ = True # noqa: F841\n\n if self._loaded_app is not None:\n return self._loaded_app\n\n if self.create_app is not None:\n app = call_factory(self, self.create_app)\n else:\n if self.app_import_path:\n path, name = (\n re.split(r\":(?![\\\\/])\", self.app_import_path, 1) + [None]\n )[:2]\n import_name = prepare_import(path)\n app = locate_app(self, import_name, name)\n else:\n for path in (\"wsgi.py\", \"app.py\"):\n import_name = prepare_import(path)\n app = locate_app(self, import_name, None, raise_if_not_found=False)\n\n if app:\n break\n\n if not app:\n raise NoAppException(\n \"Could not locate a Flask application. You did not provide \"\n 'the \"FLASK_APP\" environment variable, and a \"wsgi.py\" or '\n '\"app.py\" module was not found in the current directory.'\n )\n\n if self.set_debug_flag:\n # Update the app's debug flag through the descriptor so that\n # other values repopulate as well.\n app.debug = get_debug_flag()\n\n self._loaded_app = app\n return app\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 394, "name": "call_factory", "kind": "ref", "category": "function", "info": " app = call_factory(self, self.create_app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 400, "name": "prepare_import", "kind": "ref", "category": "function", "info": " import_name = prepare_import(path)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 401, "name": "locate_app", "kind": "ref", "category": "function", "info": " app = locate_app(self, import_name, name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 404, "name": "prepare_import", "kind": "ref", "category": "function", "info": " import_name = prepare_import(path)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 405, "name": "locate_app", "kind": "ref", "category": "function", "info": " app = locate_app(self, import_name, None, raise_if_not_found=False)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 411, "name": "NoAppException", "kind": "ref", "category": "function", "info": " raise NoAppException(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 420, "name": "get_debug_flag", "kind": "ref", "category": "function", "info": " app.debug = get_debug_flag()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 429, "name": "with_appcontext", "kind": "def", "category": "function", "info": "def with_appcontext(f):\n \"\"\"Wraps a callback so that it's guaranteed to be executed with the\n script's application context. If callbacks are registered directly\n to the ``app.cli`` object then they are wrapped with this function\n by default unless it's disabled.\n \"\"\"\n\n @click.pass_context\n def decorator(__ctx, *args, **kwargs):\n with __ctx.ensure_object(ScriptInfo).load_app().app_context():\n return __ctx.invoke(f, *args, **kwargs)\n\n return update_wrapper(decorator, f)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 437, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(__ctx, *args, **kwargs):\n with __ctx.ensure_object(ScriptInfo).load_app().app_context():\n return __ctx.invoke(f, *args, **kwargs)\n\n return update_wrapper(decorator, f)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 438, "name": "ensure_object", "kind": "ref", "category": "function", "info": " with __ctx.ensure_object(ScriptInfo).load_app().app_context():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 438, "name": "load_app", "kind": "ref", "category": "function", "info": " with __ctx.ensure_object(ScriptInfo).load_app().app_context():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 438, "name": "app_context", "kind": "ref", "category": "function", "info": " with __ctx.ensure_object(ScriptInfo).load_app().app_context():\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 439, "name": "invoke", "kind": "ref", "category": "function", "info": " return __ctx.invoke(f, *args, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 444, "name": "AppGroup", "kind": "def", "category": "class", "info": "command\tgroup"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 459, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(f):\n if wrap_for_ctx:\n f = with_appcontext(f)\n return click.Group.command(self, *args, **kwargs)(f)\n\n return decorator\n\n def group(self, *args, **kwargs):\n \"\"\"This works exactly like the method of the same name on a regular\n :class:`click.Group` but it defaults the group class to\n :class:`AppGroup`.\n \"\"\"\n kwargs.setdefault(\"cls\", AppGroup)\n return click.Group.group(self, *args, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 461, "name": "with_appcontext", "kind": "ref", "category": "function", "info": " f = with_appcontext(f)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 475, "name": "FlaskGroup", "kind": "def", "category": "class", "info": "__init__\t_load_plugin_commands\tget_command\tlist_commands\tmain"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 518, "name": "add_command", "kind": "ref", "category": "function", "info": " self.add_command(run_command)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 519, "name": "add_command", "kind": "ref", "category": "function", "info": " self.add_command(shell_command)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 520, "name": "add_command", "kind": "ref", "category": "function", "info": " self.add_command(routes_command)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 524, "name": "_load_plugin_commands", "kind": "def", "category": "function", "info": " def _load_plugin_commands(self):\n if self._loaded_plugin_commands:\n return\n try:\n import pkg_resources\n except ImportError:\n self._loaded_plugin_commands = True\n return\n\n for ep in pkg_resources.iter_entry_points(\"flask.commands\"):\n self.add_command(ep.load(), ep.name)\n self._loaded_plugin_commands = True\n\n def get_command(self, ctx, name):\n self._load_plugin_commands()\n # Look up built-in and plugin commands, which should be\n # available even if the app fails to load.\n rv = super().get_command(ctx, name)\n\n if rv is not None:\n return rv\n\n info = ctx.ensure_object(ScriptInfo)\n\n # Look up commands provided by the app, showing an error and\n # continuing if the app couldn't be loaded.\n try:\n return info.load_app().cli.get_command(ctx, name)\n except NoAppException as e:\n click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n\n def list_commands(self, ctx):\n self._load_plugin_commands()\n # Start with the built-in and plugin commands.\n rv = set(super().list_commands(ctx))\n info = ctx.ensure_object(ScriptInfo)\n\n # Add commands provided by the app, showing an error and\n # continuing if the app couldn't be loaded.\n try:\n rv.update(info.load_app().cli.list_commands(ctx))\n except NoAppException as e:\n # When an app couldn't be loaded, show the error message\n # without the traceback.\n click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n except Exception:\n # When any other errors occurred during loading, show the\n # full traceback.\n click.secho(f\"{traceback.format_exc()}\\n\", err=True, fg=\"red\")\n\n return sorted(rv)\n\n def main(self, *args, **kwargs):\n # Set a global flag that indicates that we were invoked from the\n # command line interface. This is detected by Flask.run to make the\n # call into a no-op. This is necessary to avoid ugly errors when the\n # script that is loaded here also attempts to start a server.\n os.environ[\"FLASK_RUN_FROM_CLI\"] = \"true\"\n\n if get_load_dotenv(self.load_dotenv):\n load_dotenv()\n\n obj = kwargs.get(\"obj\")\n\n if obj is None:\n obj = ScriptInfo(\n create_app=self.create_app, set_debug_flag=self.set_debug_flag\n )\n\n kwargs[\"obj\"] = obj\n kwargs.setdefault(\"auto_envvar_prefix\", \"FLASK\")\n return super().main(*args, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 534, "name": "add_command", "kind": "ref", "category": "function", "info": " self.add_command(ep.load(), ep.name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 534, "name": "load", "kind": "ref", "category": "function", "info": " self.add_command(ep.load(), ep.name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 537, "name": "get_command", "kind": "def", "category": "function", "info": " def get_command(self, ctx, name):\n self._load_plugin_commands()\n # Look up built-in and plugin commands, which should be\n # available even if the app fails to load.\n rv = super().get_command(ctx, name)\n\n if rv is not None:\n return rv\n\n info = ctx.ensure_object(ScriptInfo)\n\n # Look up commands provided by the app, showing an error and\n # continuing if the app couldn't be loaded.\n try:\n return info.load_app().cli.get_command(ctx, name)\n except NoAppException as e:\n click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n\n def list_commands(self, ctx):\n self._load_plugin_commands()\n # Start with the built-in and plugin commands.\n rv = set(super().list_commands(ctx))\n info = ctx.ensure_object(ScriptInfo)\n\n # Add commands provided by the app, showing an error and\n # continuing if the app couldn't be loaded.\n try:\n rv.update(info.load_app().cli.list_commands(ctx))\n except NoAppException as e:\n # When an app couldn't be loaded, show the error message\n # without the traceback.\n click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n except Exception:\n # When any other errors occurred during loading, show the\n # full traceback.\n click.secho(f\"{traceback.format_exc()}\\n\", err=True, fg=\"red\")\n\n return sorted(rv)\n\n def main(self, *args, **kwargs):\n # Set a global flag that indicates that we were invoked from the\n # command line interface. This is detected by Flask.run to make the\n # call into a no-op. This is necessary to avoid ugly errors when the\n # script that is loaded here also attempts to start a server.\n os.environ[\"FLASK_RUN_FROM_CLI\"] = \"true\"\n\n if get_load_dotenv(self.load_dotenv):\n load_dotenv()\n\n obj = kwargs.get(\"obj\")\n\n if obj is None:\n obj = ScriptInfo(\n create_app=self.create_app, set_debug_flag=self.set_debug_flag\n )\n\n kwargs[\"obj\"] = obj\n kwargs.setdefault(\"auto_envvar_prefix\", \"FLASK\")\n return super().main(*args, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 538, "name": "_load_plugin_commands", "kind": "ref", "category": "function", "info": " self._load_plugin_commands()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 541, "name": "get_command", "kind": "ref", "category": "function", "info": " rv = super().get_command(ctx, name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 546, "name": "ensure_object", "kind": "ref", "category": "function", "info": " info = ctx.ensure_object(ScriptInfo)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 551, "name": "load_app", "kind": "ref", "category": "function", "info": " return info.load_app().cli.get_command(ctx, name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 551, "name": "get_command", "kind": "ref", "category": "function", "info": " return info.load_app().cli.get_command(ctx, name)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 553, "name": "format_message", "kind": "ref", "category": "function", "info": " click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 555, "name": "list_commands", "kind": "def", "category": "function", "info": " def list_commands(self, ctx):\n self._load_plugin_commands()\n # Start with the built-in and plugin commands.\n rv = set(super().list_commands(ctx))\n info = ctx.ensure_object(ScriptInfo)\n\n # Add commands provided by the app, showing an error and\n # continuing if the app couldn't be loaded.\n try:\n rv.update(info.load_app().cli.list_commands(ctx))\n except NoAppException as e:\n # When an app couldn't be loaded, show the error message\n # without the traceback.\n click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n except Exception:\n # When any other errors occurred during loading, show the\n # full traceback.\n click.secho(f\"{traceback.format_exc()}\\n\", err=True, fg=\"red\")\n\n return sorted(rv)\n\n def main(self, *args, **kwargs):\n # Set a global flag that indicates that we were invoked from the\n # command line interface. This is detected by Flask.run to make the\n # call into a no-op. This is necessary to avoid ugly errors when the\n # script that is loaded here also attempts to start a server.\n os.environ[\"FLASK_RUN_FROM_CLI\"] = \"true\"\n\n if get_load_dotenv(self.load_dotenv):\n load_dotenv()\n\n obj = kwargs.get(\"obj\")\n\n if obj is None:\n obj = ScriptInfo(\n create_app=self.create_app, set_debug_flag=self.set_debug_flag\n )\n\n kwargs[\"obj\"] = obj\n kwargs.setdefault(\"auto_envvar_prefix\", \"FLASK\")\n return super().main(*args, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 556, "name": "_load_plugin_commands", "kind": "ref", "category": "function", "info": " self._load_plugin_commands()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 558, "name": "list_commands", "kind": "ref", "category": "function", "info": " rv = set(super().list_commands(ctx))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 559, "name": "ensure_object", "kind": "ref", "category": "function", "info": " info = ctx.ensure_object(ScriptInfo)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 564, "name": "load_app", "kind": "ref", "category": "function", "info": " rv.update(info.load_app().cli.list_commands(ctx))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 564, "name": "list_commands", "kind": "ref", "category": "function", "info": " rv.update(info.load_app().cli.list_commands(ctx))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 568, "name": "format_message", "kind": "ref", "category": "function", "info": " click.secho(f\"Error: {e.format_message()}\\n\", err=True, fg=\"red\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 583, "name": "get_load_dotenv", "kind": "ref", "category": "function", "info": " if get_load_dotenv(self.load_dotenv):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 589, "name": "ScriptInfo", "kind": "ref", "category": "function", "info": " obj = ScriptInfo(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 598, "name": "_path_is_ancestor", "kind": "def", "category": "function", "info": "def _path_is_ancestor(path, other):\n \"\"\"Take ``other`` and remove the length of ``path`` from it. Then join it\n to ``path``. If it is the original value, ``path`` is an ancestor of\n ``other``.\"\"\"\n return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 628, "name": "isfile", "kind": "ref", "category": "function", "info": " if path or os.path.isfile(\".env\") or os.path.isfile(\".flaskenv\"):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 628, "name": "isfile", "kind": "ref", "category": "function", "info": " if path or os.path.isfile(\".env\") or os.path.isfile(\".flaskenv\"):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 641, "name": "isfile", "kind": "ref", "category": "function", "info": " if os.path.isfile(path):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 655, "name": "dirname", "kind": "ref", "category": "function", "info": " new_dir = os.path.dirname(path)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 662, "name": "show_server_banner", "kind": "def", "category": "function", "info": "def show_server_banner(env, debug, app_import_path, eager_loading):\n \"\"\"Show extra startup messages the first time the server is run,\n ignoring the reloader.\n \"\"\"\n if os.environ.get(\"WERKZEUG_RUN_MAIN\") == \"true\":\n return\n\n if app_import_path is not None:\n message = f\" * Serving Flask app {app_import_path!r}\"\n\n if not eager_loading:\n message += \" (lazy loading)\"\n\n click.echo(message)\n\n click.echo(f\" * Environment: {env}\")\n\n if env == \"production\":\n click.secho(\n \" WARNING: This is a development server. Do not use it in\"\n \" a production deployment.\",\n fg=\"red\",\n )\n click.secho(\" Use a production WSGI server instead.\", dim=True)\n\n if debug is not None:\n click.echo(f\" * Debug mode: {'on' if debug else 'off'}\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 691, "name": "CertParamType", "kind": "def", "category": "class", "info": "__init__\tconvert"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 702, "name": "convert", "kind": "def", "category": "function", "info": " def convert(self, value, param, ctx):\n if ssl is None:\n raise click.BadParameter(\n 'Using \"--cert\" requires Python to be compiled with SSL support.',\n ctx,\n param,\n )\n\n try:\n return self.path_type(value, param, ctx)\n except click.BadParameter:\n value = click.STRING(value, param, ctx).lower()\n\n if value == \"adhoc\":\n try:\n import cryptography # noqa: F401\n except ImportError:\n raise click.BadParameter(\n \"Using ad-hoc certificates requires the cryptography library.\",\n ctx,\n param,\n )\n\n return value\n\n obj = import_string(value, silent=True)\n\n if isinstance(obj, ssl.SSLContext):\n return obj\n\n raise\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 711, "name": "path_type", "kind": "ref", "category": "function", "info": " return self.path_type(value, param, ctx)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 735, "name": "_validate_key", "kind": "def", "category": "function", "info": "def _validate_key(ctx, param, value):\n \"\"\"The ``--key`` option must be specified when ``--cert`` is a file.\n Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.\n \"\"\"\n cert = ctx.params.get(\"cert\")\n is_adhoc = cert == \"adhoc\"\n is_context = ssl and isinstance(cert, ssl.SSLContext)\n\n if value is not None:\n if is_adhoc:\n raise click.BadParameter(\n 'When \"--cert\" is \"adhoc\", \"--key\" is not used.', ctx, param\n )\n\n if is_context:\n raise click.BadParameter(\n 'When \"--cert\" is an SSLContext object, \"--key is not used.', ctx, param\n )\n\n if not cert:\n raise click.BadParameter('\"--cert\" must also be specified.', ctx, param)\n\n ctx.params[\"cert\"] = cert, value\n\n else:\n if cert and not (is_adhoc or is_context):\n raise click.BadParameter('Required when using \"--cert\".', ctx, param)\n\n return value\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 766, "name": "SeparatedPathType", "kind": "def", "category": "class", "info": "convert"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 772, "name": "convert", "kind": "def", "category": "function", "info": " def convert(self, value, param, ctx):\n if ssl is None:\n raise click.BadParameter(\n 'Using \"--cert\" requires Python to be compiled with SSL support.',\n ctx,\n param,\n )\n\n try:\n return self.path_type(value, param, ctx)\n except click.BadParameter:\n value = click.STRING(value, param, ctx).lower()\n\n if value == \"adhoc\":\n try:\n import cryptography # noqa: F401\n except ImportError:\n raise click.BadParameter(\n \"Using ad-hoc certificates requires the cryptography library.\",\n ctx,\n param,\n )\n\n return value\n\n obj = import_string(value, silent=True)\n\n if isinstance(obj, ssl.SSLContext):\n return obj\n\n raise\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 773, "name": "split_envvar_value", "kind": "ref", "category": "function", "info": " items = self.split_envvar_value(value)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 775, "name": "super_convert", "kind": "ref", "category": "function", "info": " return [super_convert(item, param, ctx) for item in items]\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 782, "name": "CertParamType", "kind": "ref", "category": "function", "info": " \"--cert\", type=CertParamType(), help=\"Specify a certificate file to use HTTPS.\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 817, "name": "SeparatedPathType", "kind": "ref", "category": "function", "info": " type=SeparatedPathType(),\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 824, "name": "run_command", "kind": "def", "category": "function", "info": "def run_command(\n info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 835, "name": "get_debug_flag", "kind": "ref", "category": "function", "info": " debug = get_debug_flag()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 843, "name": "show_server_banner", "kind": "ref", "category": "function", "info": " show_server_banner(get_env(), debug, info.app_import_path, eager_loading)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 843, "name": "get_env", "kind": "ref", "category": "function", "info": " show_server_banner(get_env(), debug, info.app_import_path, eager_loading)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 844, "name": "DispatchingApp", "kind": "ref", "category": "function", "info": " app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 862, "name": "shell_command", "kind": "def", "category": "function", "info": "def shell_command() -> None:\n \"\"\"Run an interactive Python shell in the context of a given\n Flask application. The application will populate the default\n namespace of this shell according to its configuration.\n\n This is useful for executing small snippets of management code\n without having to manually configure the application.\n \"\"\"\n import code\n from .globals import _app_ctx_stack\n\n app = _app_ctx_stack.top.app\n banner = (\n f\"Python {sys.version} on {sys.platform}\\n\"\n f\"App: {app.import_name} [{app.env}]\\n\"\n f\"Instance: {app.instance_path}\"\n )\n ctx: dict = {}\n\n # Support the regular Python interpreter startup script if someone\n # is using it.\n startup = os.environ.get(\"PYTHONSTARTUP\")\n if startup and os.path.isfile(startup):\n with open(startup) as f:\n eval(compile(f.read(), startup, \"exec\"), ctx)\n\n ctx.update(app.make_shell_context())\n\n # Site, customize, or startup script can set a hook to call when\n # entering interactive mode. The default one sets up readline with\n # tab and history completion.\n interactive_hook = getattr(sys, \"__interactivehook__\", None)\n\n if interactive_hook is not None:\n try:\n import readline\n from rlcompleter import Completer\n except ImportError:\n pass\n else:\n # rlcompleter uses __main__.__dict__ by default, which is\n # flask.__main__. Use the shell context instead.\n readline.set_completer(Completer(ctx).complete)\n\n interactive_hook()\n\n code.interact(banner=banner, local=ctx)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 884, "name": "isfile", "kind": "ref", "category": "function", "info": " if startup and os.path.isfile(startup):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 888, "name": "make_shell_context", "kind": "ref", "category": "function", "info": " ctx.update(app.make_shell_context())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 906, "name": "interactive_hook", "kind": "ref", "category": "function", "info": " interactive_hook()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 908, "name": "interact", "kind": "ref", "category": "function", "info": " code.interact(banner=banner, local=ctx)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 924, "name": "routes_command", "kind": "def", "category": "function", "info": "def routes_command(sort: str, all_methods: bool) -> None:\n \"\"\"Show all registered routes with endpoints and methods.\"\"\"\n\n rules = list(current_app.url_map.iter_rules())\n if not rules:\n click.echo(\"No routes were registered.\")\n return\n\n ignored_methods = set(() if all_methods else (\"HEAD\", \"OPTIONS\"))\n\n if sort in (\"endpoint\", \"rule\"):\n rules = sorted(rules, key=attrgetter(sort))\n elif sort == \"methods\":\n rules = sorted(rules, key=lambda rule: sorted(rule.methods)) # type: ignore\n\n rule_methods = [\n \", \".join(sorted(rule.methods - ignored_methods)) # type: ignore\n for rule in rules\n ]\n\n headers = (\"Endpoint\", \"Methods\", \"Rule\")\n widths = (\n max(len(rule.endpoint) for rule in rules),\n max(len(methods) for methods in rule_methods),\n max(len(rule.rule) for rule in rules),\n )\n widths = [max(len(h), w) for h, w in zip(headers, widths)]\n row = \"{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}\".format(*widths)\n\n click.echo(row.format(*headers).strip())\n click.echo(row.format(*(\"-\" * width for width in widths)))\n\n for rule, methods in zip(rules, rule_methods):\n click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 927, "name": "iter_rules", "kind": "ref", "category": "function", "info": " rules = list(current_app.url_map.iter_rules())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/cli.py", "rel_fname": "src/flask/cli.py", "line": 960, "name": "FlaskGroup", "kind": "ref", "category": "function", "info": "cli = FlaskGroup(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 8, "name": "ConfigAttribute", "kind": "def", "category": "class", "info": "__init__\t__get__\t__set__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 20, "name": "get_converter", "kind": "ref", "category": "function", "info": " rv = self.get_converter(rv)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 23, "name": "__set__", "kind": "def", "category": "function", "info": " def __set__(self, obj: t.Any, value: t.Any) -> None:\n obj.config[self.__name__] = value\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 27, "name": "Config", "kind": "def", "category": "class", "info": "__init__\tfrom_envvar\tfrom_pyfile\tfrom_object\tfrom_file\tfrom_mapping\tget_namespace\t__repr__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 75, "name": "from_envvar", "kind": "def", "category": "function", "info": " def from_envvar(self, variable_name: str, silent: bool = False) -> bool:\n \"\"\"Loads a configuration from an environment variable pointing to\n a configuration file. This is basically just a shortcut with nicer\n error messages for this line of code::\n\n app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])\n\n :param variable_name: name of the environment variable\n :param silent: set to ``True`` if you want silent failure for missing\n files.\n :return: bool. ``True`` if able to load config, ``False`` otherwise.\n \"\"\"\n rv = os.environ.get(variable_name)\n if not rv:\n if silent:\n return False\n raise RuntimeError(\n f\"The environment variable {variable_name!r} is not set\"\n \" and as such configuration could not be loaded. Set\"\n \" this variable and make it point to a configuration\"\n \" file\"\n )\n return self.from_pyfile(rv, silent=silent)\n\n def from_pyfile(self, filename: str, silent: bool = False) -> bool:\n \"\"\"Updates the values in the config from a Python file. This function\n behaves as if the file was imported as module with the\n :meth:`from_object` function.\n\n :param filename: the filename of the config. This can either be an\n absolute filename or a filename relative to the\n root path.\n :param silent: set to ``True`` if you want silent failure for missing\n files.\n\n .. versionadded:: 0.7\n `silent` parameter.\n \"\"\"\n filename = os.path.join(self.root_path, filename)\n d = types.ModuleType(\"config\")\n d.__file__ = filename\n try:\n with open(filename, mode=\"rb\") as config_file:\n exec(compile(config_file.read(), filename, \"exec\"), d.__dict__)\n except OSError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):\n return False\n e.strerror = f\"Unable to load configuration file ({e.strerror})\"\n raise\n self.from_object(d)\n return True\n\n def from_object(self, obj: t.Union[object, str]) -> None:\n \"\"\"Updates the values from the given object. An object can be of one\n of the following two types:\n\n - a string: in this case the object with that name will be imported\n - an actual object reference: that object is used directly\n\n Objects are usually either modules or classes. :meth:`from_object`\n loads only the uppercase attributes of the module/class. A ``dict``\n object will not work with :meth:`from_object` because the keys of a\n ``dict`` are not attributes of the ``dict`` class.\n\n Example of module-based configuration::\n\n app.config.from_object('yourapplication.default_config')\n from yourapplication import default_config\n app.config.from_object(default_config)\n\n Nothing is done to the object before loading. If the object is a\n class and has ``@property`` attributes, it needs to be\n instantiated before being passed to this method.\n\n You should not use this function to load the actual configuration but\n rather configuration defaults. The actual config should be loaded\n with :meth:`from_pyfile` and ideally from a location not within the\n package because the package might be installed system wide.\n\n See :ref:`config-dev-prod` for an example of class-based configuration\n using :meth:`from_object`.\n\n :param obj: an import name or object\n \"\"\"\n if isinstance(obj, str):\n obj = import_string(obj)\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n\n def from_file(\n self,\n filename: str,\n load: t.Callable[[t.IO[t.Any]], t.Mapping],\n silent: bool = False,\n ) -> bool:\n \"\"\"Update the values in the config from a file that is loaded\n using the ``load`` parameter. The loaded data is passed to the\n :meth:`from_mapping` method.\n\n .. code-block:: python\n\n import toml\n app.config.from_file(\"config.toml\", load=toml.load)\n\n :param filename: The path to the data file. This can be an\n absolute path or relative to the config root path.\n :param load: A callable that takes a file handle and returns a\n mapping of loaded data from the file.\n :type load: ``Callable[[Reader], Mapping]`` where ``Reader``\n implements a ``read`` method.\n :param silent: Ignore the file if it doesn't exist.\n\n .. versionadded:: 2.0\n \"\"\"\n filename = os.path.join(self.root_path, filename)\n\n try:\n with open(filename) as f:\n obj = load(f)\n except OSError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n\n e.strerror = f\"Unable to load configuration file ({e.strerror})\"\n raise\n\n return self.from_mapping(obj)\n\n def from_mapping(\n self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any\n ) -> bool:\n \"\"\"Updates the config like :meth:`update` ignoring items with non-upper\n keys.\n\n .. versionadded:: 0.11\n \"\"\"\n mappings: t.Dict[str, t.Any] = {}\n if mapping is not None:\n mappings.update(mapping)\n mappings.update(kwargs)\n for key, value in mappings.items():\n if key.isupper():\n self[key] = value\n return True\n\n def get_namespace(\n self, namespace: str, lowercase: bool = True, trim_namespace: bool = True\n ) -> t.Dict[str, t.Any]:\n \"\"\"Returns a dictionary containing a subset of configuration options\n that match the specified namespace/prefix. Example usage::\n\n app.config['IMAGE_STORE_TYPE'] = 'fs'\n app.config['IMAGE_STORE_PATH'] = '/var/app/images'\n app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'\n image_store_config = app.config.get_namespace('IMAGE_STORE_')\n\n The resulting dictionary `image_store_config` would look like::\n\n {\n 'type': 'fs',\n 'path': '/var/app/images',\n 'base_url': 'http://img.website.com'\n }\n\n This is often useful when configuration options map directly to\n keyword arguments in functions or class constructors.\n\n :param namespace: a configuration namespace\n :param lowercase: a flag indicating if the keys of the resulting\n dictionary should be lowercase\n :param trim_namespace: a flag indicating if the keys of the resulting\n dictionary should not include the namespace\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {}\n for k, v in self.items():\n if not k.startswith(namespace):\n continue\n if trim_namespace:\n key = k[len(namespace) :]\n else:\n key = k\n if lowercase:\n key = key.lower()\n rv[key] = v\n return rv\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {dict.__repr__(self)}>\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 97, "name": "from_pyfile", "kind": "ref", "category": "function", "info": " return self.from_pyfile(rv, silent=silent)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 99, "name": "from_pyfile", "kind": "def", "category": "function", "info": " def from_pyfile(self, filename: str, silent: bool = False) -> bool:\n \"\"\"Updates the values in the config from a Python file. This function\n behaves as if the file was imported as module with the\n :meth:`from_object` function.\n\n :param filename: the filename of the config. This can either be an\n absolute filename or a filename relative to the\n root path.\n :param silent: set to ``True`` if you want silent failure for missing\n files.\n\n .. versionadded:: 0.7\n `silent` parameter.\n \"\"\"\n filename = os.path.join(self.root_path, filename)\n d = types.ModuleType(\"config\")\n d.__file__ = filename\n try:\n with open(filename, mode=\"rb\") as config_file:\n exec(compile(config_file.read(), filename, \"exec\"), d.__dict__)\n except OSError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):\n return False\n e.strerror = f\"Unable to load configuration file ({e.strerror})\"\n raise\n self.from_object(d)\n return True\n\n def from_object(self, obj: t.Union[object, str]) -> None:\n \"\"\"Updates the values from the given object. An object can be of one\n of the following two types:\n\n - a string: in this case the object with that name will be imported\n - an actual object reference: that object is used directly\n\n Objects are usually either modules or classes. :meth:`from_object`\n loads only the uppercase attributes of the module/class. A ``dict``\n object will not work with :meth:`from_object` because the keys of a\n ``dict`` are not attributes of the ``dict`` class.\n\n Example of module-based configuration::\n\n app.config.from_object('yourapplication.default_config')\n from yourapplication import default_config\n app.config.from_object(default_config)\n\n Nothing is done to the object before loading. If the object is a\n class and has ``@property`` attributes, it needs to be\n instantiated before being passed to this method.\n\n You should not use this function to load the actual configuration but\n rather configuration defaults. The actual config should be loaded\n with :meth:`from_pyfile` and ideally from a location not within the\n package because the package might be installed system wide.\n\n See :ref:`config-dev-prod` for an example of class-based configuration\n using :meth:`from_object`.\n\n :param obj: an import name or object\n \"\"\"\n if isinstance(obj, str):\n obj = import_string(obj)\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n\n def from_file(\n self,\n filename: str,\n load: t.Callable[[t.IO[t.Any]], t.Mapping],\n silent: bool = False,\n ) -> bool:\n \"\"\"Update the values in the config from a file that is loaded\n using the ``load`` parameter. The loaded data is passed to the\n :meth:`from_mapping` method.\n\n .. code-block:: python\n\n import toml\n app.config.from_file(\"config.toml\", load=toml.load)\n\n :param filename: The path to the data file. This can be an\n absolute path or relative to the config root path.\n :param load: A callable that takes a file handle and returns a\n mapping of loaded data from the file.\n :type load: ``Callable[[Reader], Mapping]`` where ``Reader``\n implements a ``read`` method.\n :param silent: Ignore the file if it doesn't exist.\n\n .. versionadded:: 2.0\n \"\"\"\n filename = os.path.join(self.root_path, filename)\n\n try:\n with open(filename) as f:\n obj = load(f)\n except OSError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n\n e.strerror = f\"Unable to load configuration file ({e.strerror})\"\n raise\n\n return self.from_mapping(obj)\n\n def from_mapping(\n self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any\n ) -> bool:\n \"\"\"Updates the config like :meth:`update` ignoring items with non-upper\n keys.\n\n .. versionadded:: 0.11\n \"\"\"\n mappings: t.Dict[str, t.Any] = {}\n if mapping is not None:\n mappings.update(mapping)\n mappings.update(kwargs)\n for key, value in mappings.items():\n if key.isupper():\n self[key] = value\n return True\n\n def get_namespace(\n self, namespace: str, lowercase: bool = True, trim_namespace: bool = True\n ) -> t.Dict[str, t.Any]:\n \"\"\"Returns a dictionary containing a subset of configuration options\n that match the specified namespace/prefix. Example usage::\n\n app.config['IMAGE_STORE_TYPE'] = 'fs'\n app.config['IMAGE_STORE_PATH'] = '/var/app/images'\n app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'\n image_store_config = app.config.get_namespace('IMAGE_STORE_')\n\n The resulting dictionary `image_store_config` would look like::\n\n {\n 'type': 'fs',\n 'path': '/var/app/images',\n 'base_url': 'http://img.website.com'\n }\n\n This is often useful when configuration options map directly to\n keyword arguments in functions or class constructors.\n\n :param namespace: a configuration namespace\n :param lowercase: a flag indicating if the keys of the resulting\n dictionary should be lowercase\n :param trim_namespace: a flag indicating if the keys of the resulting\n dictionary should not include the namespace\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {}\n for k, v in self.items():\n if not k.startswith(namespace):\n continue\n if trim_namespace:\n key = k[len(namespace) :]\n else:\n key = k\n if lowercase:\n key = key.lower()\n rv[key] = v\n return rv\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {dict.__repr__(self)}>\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 124, "name": "from_object", "kind": "ref", "category": "function", "info": " self.from_object(d)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 127, "name": "from_object", "kind": "def", "category": "function", "info": " def from_object(self, obj: t.Union[object, str]) -> None:\n \"\"\"Updates the values from the given object. An object can be of one\n of the following two types:\n\n - a string: in this case the object with that name will be imported\n - an actual object reference: that object is used directly\n\n Objects are usually either modules or classes. :meth:`from_object`\n loads only the uppercase attributes of the module/class. A ``dict``\n object will not work with :meth:`from_object` because the keys of a\n ``dict`` are not attributes of the ``dict`` class.\n\n Example of module-based configuration::\n\n app.config.from_object('yourapplication.default_config')\n from yourapplication import default_config\n app.config.from_object(default_config)\n\n Nothing is done to the object before loading. If the object is a\n class and has ``@property`` attributes, it needs to be\n instantiated before being passed to this method.\n\n You should not use this function to load the actual configuration but\n rather configuration defaults. The actual config should be loaded\n with :meth:`from_pyfile` and ideally from a location not within the\n package because the package might be installed system wide.\n\n See :ref:`config-dev-prod` for an example of class-based configuration\n using :meth:`from_object`.\n\n :param obj: an import name or object\n \"\"\"\n if isinstance(obj, str):\n obj = import_string(obj)\n for key in dir(obj):\n if key.isupper():\n self[key] = getattr(obj, key)\n\n def from_file(\n self,\n filename: str,\n load: t.Callable[[t.IO[t.Any]], t.Mapping],\n silent: bool = False,\n ) -> bool:\n \"\"\"Update the values in the config from a file that is loaded\n using the ``load`` parameter. The loaded data is passed to the\n :meth:`from_mapping` method.\n\n .. code-block:: python\n\n import toml\n app.config.from_file(\"config.toml\", load=toml.load)\n\n :param filename: The path to the data file. This can be an\n absolute path or relative to the config root path.\n :param load: A callable that takes a file handle and returns a\n mapping of loaded data from the file.\n :type load: ``Callable[[Reader], Mapping]`` where ``Reader``\n implements a ``read`` method.\n :param silent: Ignore the file if it doesn't exist.\n\n .. versionadded:: 2.0\n \"\"\"\n filename = os.path.join(self.root_path, filename)\n\n try:\n with open(filename) as f:\n obj = load(f)\n except OSError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n\n e.strerror = f\"Unable to load configuration file ({e.strerror})\"\n raise\n\n return self.from_mapping(obj)\n\n def from_mapping(\n self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any\n ) -> bool:\n \"\"\"Updates the config like :meth:`update` ignoring items with non-upper\n keys.\n\n .. versionadded:: 0.11\n \"\"\"\n mappings: t.Dict[str, t.Any] = {}\n if mapping is not None:\n mappings.update(mapping)\n mappings.update(kwargs)\n for key, value in mappings.items():\n if key.isupper():\n self[key] = value\n return True\n\n def get_namespace(\n self, namespace: str, lowercase: bool = True, trim_namespace: bool = True\n ) -> t.Dict[str, t.Any]:\n \"\"\"Returns a dictionary containing a subset of configuration options\n that match the specified namespace/prefix. Example usage::\n\n app.config['IMAGE_STORE_TYPE'] = 'fs'\n app.config['IMAGE_STORE_PATH'] = '/var/app/images'\n app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'\n image_store_config = app.config.get_namespace('IMAGE_STORE_')\n\n The resulting dictionary `image_store_config` would look like::\n\n {\n 'type': 'fs',\n 'path': '/var/app/images',\n 'base_url': 'http://img.website.com'\n }\n\n This is often useful when configuration options map directly to\n keyword arguments in functions or class constructors.\n\n :param namespace: a configuration namespace\n :param lowercase: a flag indicating if the keys of the resulting\n dictionary should be lowercase\n :param trim_namespace: a flag indicating if the keys of the resulting\n dictionary should not include the namespace\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {}\n for k, v in self.items():\n if not k.startswith(namespace):\n continue\n if trim_namespace:\n key = k[len(namespace) :]\n else:\n key = k\n if lowercase:\n key = key.lower()\n rv[key] = v\n return rv\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {dict.__repr__(self)}>\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 165, "name": "from_file", "kind": "def", "category": "function", "info": " def from_file(\n self,\n filename: str,\n load: t.Callable[[t.IO[t.Any]], t.Mapping],\n silent: bool = False,\n ) -> bool:\n \"\"\"Update the values in the config from a file that is loaded\n using the ``load`` parameter. The loaded data is passed to the\n :meth:`from_mapping` method.\n\n .. code-block:: python\n\n import toml\n app.config.from_file(\"config.toml\", load=toml.load)\n\n :param filename: The path to the data file. This can be an\n absolute path or relative to the config root path.\n :param load: A callable that takes a file handle and returns a\n mapping of loaded data from the file.\n :type load: ``Callable[[Reader], Mapping]`` where ``Reader``\n implements a ``read`` method.\n :param silent: Ignore the file if it doesn't exist.\n\n .. versionadded:: 2.0\n \"\"\"\n filename = os.path.join(self.root_path, filename)\n\n try:\n with open(filename) as f:\n obj = load(f)\n except OSError as e:\n if silent and e.errno in (errno.ENOENT, errno.EISDIR):\n return False\n\n e.strerror = f\"Unable to load configuration file ({e.strerror})\"\n raise\n\n return self.from_mapping(obj)\n\n def from_mapping(\n self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any\n ) -> bool:\n \"\"\"Updates the config like :meth:`update` ignoring items with non-upper\n keys.\n\n .. versionadded:: 0.11\n \"\"\"\n mappings: t.Dict[str, t.Any] = {}\n if mapping is not None:\n mappings.update(mapping)\n mappings.update(kwargs)\n for key, value in mappings.items():\n if key.isupper():\n self[key] = value\n return True\n\n def get_namespace(\n self, namespace: str, lowercase: bool = True, trim_namespace: bool = True\n ) -> t.Dict[str, t.Any]:\n \"\"\"Returns a dictionary containing a subset of configuration options\n that match the specified namespace/prefix. Example usage::\n\n app.config['IMAGE_STORE_TYPE'] = 'fs'\n app.config['IMAGE_STORE_PATH'] = '/var/app/images'\n app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'\n image_store_config = app.config.get_namespace('IMAGE_STORE_')\n\n The resulting dictionary `image_store_config` would look like::\n\n {\n 'type': 'fs',\n 'path': '/var/app/images',\n 'base_url': 'http://img.website.com'\n }\n\n This is often useful when configuration options map directly to\n keyword arguments in functions or class constructors.\n\n :param namespace: a configuration namespace\n :param lowercase: a flag indicating if the keys of the resulting\n dictionary should be lowercase\n :param trim_namespace: a flag indicating if the keys of the resulting\n dictionary should not include the namespace\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {}\n for k, v in self.items():\n if not k.startswith(namespace):\n continue\n if trim_namespace:\n key = k[len(namespace) :]\n else:\n key = k\n if lowercase:\n key = key.lower()\n rv[key] = v\n return rv\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {dict.__repr__(self)}>\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 194, "name": "load", "kind": "ref", "category": "function", "info": " obj = load(f)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 202, "name": "from_mapping", "kind": "ref", "category": "function", "info": " return self.from_mapping(obj)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 204, "name": "from_mapping", "kind": "def", "category": "function", "info": " def from_mapping(\n self, mapping: t.Optional[t.Mapping[str, t.Any]] = None, **kwargs: t.Any\n ) -> bool:\n \"\"\"Updates the config like :meth:`update` ignoring items with non-upper\n keys.\n\n .. versionadded:: 0.11\n \"\"\"\n mappings: t.Dict[str, t.Any] = {}\n if mapping is not None:\n mappings.update(mapping)\n mappings.update(kwargs)\n for key, value in mappings.items():\n if key.isupper():\n self[key] = value\n return True\n\n def get_namespace(\n self, namespace: str, lowercase: bool = True, trim_namespace: bool = True\n ) -> t.Dict[str, t.Any]:\n \"\"\"Returns a dictionary containing a subset of configuration options\n that match the specified namespace/prefix. Example usage::\n\n app.config['IMAGE_STORE_TYPE'] = 'fs'\n app.config['IMAGE_STORE_PATH'] = '/var/app/images'\n app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'\n image_store_config = app.config.get_namespace('IMAGE_STORE_')\n\n The resulting dictionary `image_store_config` would look like::\n\n {\n 'type': 'fs',\n 'path': '/var/app/images',\n 'base_url': 'http://img.website.com'\n }\n\n This is often useful when configuration options map directly to\n keyword arguments in functions or class constructors.\n\n :param namespace: a configuration namespace\n :param lowercase: a flag indicating if the keys of the resulting\n dictionary should be lowercase\n :param trim_namespace: a flag indicating if the keys of the resulting\n dictionary should not include the namespace\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {}\n for k, v in self.items():\n if not k.startswith(namespace):\n continue\n if trim_namespace:\n key = k[len(namespace) :]\n else:\n key = k\n if lowercase:\n key = key.lower()\n rv[key] = v\n return rv\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {dict.__repr__(self)}>\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/config.py", "rel_fname": "src/flask/config.py", "line": 221, "name": "get_namespace", "kind": "def", "category": "function", "info": " def get_namespace(\n self, namespace: str, lowercase: bool = True, trim_namespace: bool = True\n ) -> t.Dict[str, t.Any]:\n \"\"\"Returns a dictionary containing a subset of configuration options\n that match the specified namespace/prefix. Example usage::\n\n app.config['IMAGE_STORE_TYPE'] = 'fs'\n app.config['IMAGE_STORE_PATH'] = '/var/app/images'\n app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'\n image_store_config = app.config.get_namespace('IMAGE_STORE_')\n\n The resulting dictionary `image_store_config` would look like::\n\n {\n 'type': 'fs',\n 'path': '/var/app/images',\n 'base_url': 'http://img.website.com'\n }\n\n This is often useful when configuration options map directly to\n keyword arguments in functions or class constructors.\n\n :param namespace: a configuration namespace\n :param lowercase: a flag indicating if the keys of the resulting\n dictionary should be lowercase\n :param trim_namespace: a flag indicating if the keys of the resulting\n dictionary should not include the namespace\n\n .. versionadded:: 0.11\n \"\"\"\n rv = {}\n for k, v in self.items():\n if not k.startswith(namespace):\n continue\n if trim_namespace:\n key = k[len(namespace) :]\n else:\n key = k\n if lowercase:\n key = key.lower()\n rv[key] = v\n return rv\n\n def __repr__(self) -> str:\n return f\"<{type(self).__name__} {dict.__repr__(self)}>\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 23, "name": "_AppCtxGlobals", "kind": "def", "category": "class", "info": "__getattr__\t__setattr__\t__delattr__\tget\tpop\tsetdefault\t__contains__\t__iter__\t__repr__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 46, "name": "__getattr__", "kind": "def", "category": "function", "info": " def __getattr__(self, name: str) -> t.Any:\n try:\n return self.__dict__[name]\n except KeyError:\n raise AttributeError(name) from None\n\n def __setattr__(self, name: str, value: t.Any) -> None:\n self.__dict__[name] = value\n\n def __delattr__(self, name: str) -> None:\n try:\n del self.__dict__[name]\n except KeyError:\n raise AttributeError(name) from None\n\n def get(self, name: str, default: t.Optional[t.Any] = None) -> t.Any:\n \"\"\"Get an attribute by name, or a default value. Like\n :meth:`dict.get`.\n\n :param name: Name of attribute to get.\n :param default: Value to return if the attribute is not present.\n\n .. versionadded:: 0.10\n \"\"\"\n return self.__dict__.get(name, default)\n\n def pop(self, name: str, default: t.Any = _sentinel) -> t.Any:\n \"\"\"Get and remove an attribute by name. Like :meth:`dict.pop`.\n\n :param name: Name of attribute to pop.\n :param default: Value to return if the attribute is not present,\n instead of raising a ``KeyError``.\n\n .. versionadded:: 0.11\n \"\"\"\n if default is _sentinel:\n return self.__dict__.pop(name)\n else:\n return self.__dict__.pop(name, default)\n\n def setdefault(self, name: str, default: t.Any = None) -> t.Any:\n \"\"\"Get the value of an attribute if it is present, otherwise\n set and return a default value. Like :meth:`dict.setdefault`.\n\n :param name: Name of attribute to get.\n :param default: Value to set and return if the attribute is not\n present.\n\n .. versionadded:: 0.11\n \"\"\"\n return self.__dict__.setdefault(name, default)\n\n def __contains__(self, item: str) -> bool:\n return item in self.__dict__\n\n def __iter__(self) -> t.Iterator[str]:\n return iter(self.__dict__)\n\n def __repr__(self) -> str:\n top = _app_ctx_stack.top\n if top is not None:\n return f\"\"\n return object.__repr__(self)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 111, "name": "after_this_request", "kind": "def", "category": "function", "info": "def after_this_request(f: AfterRequestCallable) -> AfterRequestCallable:\n \"\"\"Executes a function after this request. This is useful to modify\n response objects. The function is passed the response object and has\n to return the same or a new one.\n\n Example::\n\n @app.route('/')\n def index():\n @after_this_request\n def add_header(response):\n response.headers['X-Foo'] = 'Parachute'\n return response\n return 'Hello World!'\n\n This is more useful if a function other than the view function wants to\n modify a response. For instance think of a decorator that wants to add\n some headers without converting the return value into a response object.\n\n .. versionadded:: 0.9\n \"\"\"\n _request_ctx_stack.top._after_request_functions.append(f)\n return f\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 136, "name": "copy_current_request_context", "kind": "def", "category": "function", "info": "def copy_current_request_context(f: t.Callable) -> t.Callable:\n \"\"\"A helper function that decorates a function to retain the current\n request context. This is useful when working with greenlets. The moment\n the function is decorated a copy of the request context is created and\n then pushed when the function is called. The current session is also\n included in the copied request context.\n\n Example::\n\n import gevent\n from flask import copy_current_request_context\n\n @app.route('/')\n def index():\n @copy_current_request_context\n def do_some_work():\n # do some work here, it can access flask.request or\n # flask.session like you would otherwise in the view function.\n ...\n gevent.spawn(do_some_work)\n return 'Regular response'\n\n .. versionadded:: 0.10\n \"\"\"\n top = _request_ctx_stack.top\n if top is None:\n raise RuntimeError(\n \"This decorator can only be used at local scopes \"\n \"when a request context is on the stack. For instance within \"\n \"view functions.\"\n )\n reqctx = top.copy()\n\n def wrapper(*args, **kwargs):\n with reqctx:\n return f(*args, **kwargs)\n\n return update_wrapper(wrapper, f)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 169, "name": "wrapper", "kind": "def", "category": "function", "info": " def wrapper(*args, **kwargs):\n with reqctx:\n return f(*args, **kwargs)\n\n return update_wrapper(wrapper, f)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 171, "name": "f", "kind": "ref", "category": "function", "info": " return f(*args, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 176, "name": "has_request_context", "kind": "def", "category": "function", "info": "def has_request_context() -> bool:\n \"\"\"If you have code that wants to test if a request context is there or\n not this function can be used. For instance, you may want to take advantage\n of request information if the request object is available, but fail\n silently if it is unavailable.\n\n ::\n\n class User(db.Model):\n\n def __init__(self, username, remote_addr=None):\n self.username = username\n if remote_addr is None and has_request_context():\n remote_addr = request.remote_addr\n self.remote_addr = remote_addr\n\n Alternatively you can also just test any of the context bound objects\n (such as :class:`request` or :class:`g`) for truthness::\n\n class User(db.Model):\n\n def __init__(self, username, remote_addr=None):\n self.username = username\n if remote_addr is None and request:\n remote_addr = request.remote_addr\n self.remote_addr = remote_addr\n\n .. versionadded:: 0.7\n \"\"\"\n return _request_ctx_stack.top is not None\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 208, "name": "has_app_context", "kind": "def", "category": "function", "info": "def has_app_context() -> bool:\n \"\"\"Works like :func:`has_request_context` but for the application\n context. You can also just do a boolean check on the\n :data:`current_app` object instead.\n\n .. versionadded:: 0.9\n \"\"\"\n return _app_ctx_stack.top is not None\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 218, "name": "AppContext", "kind": "def", "category": "class", "info": "__init__\tpush\tpop\t__enter__\t__exit__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 229, "name": "create_url_adapter", "kind": "ref", "category": "function", "info": " self.url_adapter = app.create_url_adapter(None)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 230, "name": "app_ctx_globals_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 236, "name": "push", "kind": "def", "category": "function", "info": " def push(self) -> None:\n \"\"\"Binds the app context to the current context.\"\"\"\n self._refcnt += 1\n _app_ctx_stack.push(self)\n appcontext_pushed.send(self.app)\n\n def pop(self, exc: t.Optional[BaseException] = _sentinel) -> None: # type: ignore\n \"\"\"Pops the app context.\"\"\"\n try:\n self._refcnt -= 1\n if self._refcnt <= 0:\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n self.app.do_teardown_appcontext(exc)\n finally:\n rv = _app_ctx_stack.pop()\n assert rv is self, f\"Popped wrong app context. ({rv!r} instead of {self!r})\"\n appcontext_popped.send(self.app)\n\n def __enter__(self) -> \"AppContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n self.pop(exc_value)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 239, "name": "push", "kind": "ref", "category": "function", "info": " _app_ctx_stack.push(self)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 240, "name": "send", "kind": "ref", "category": "function", "info": " appcontext_pushed.send(self.app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 249, "name": "do_teardown_appcontext", "kind": "ref", "category": "function", "info": " self.app.do_teardown_appcontext(exc)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 253, "name": "send", "kind": "ref", "category": "function", "info": " appcontext_popped.send(self.app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 255, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self) -> \"AppContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n self.pop(exc_value)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 256, "name": "push", "kind": "ref", "category": "function", "info": " self.push()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 259, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n self.pop(exc_value)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 265, "name": "RequestContext", "kind": "def", "category": "class", "info": "__init__\tg\tg\tcopy\tmatch_request\tpush\tpop\tauto_pop\t__enter__\t__exit__\t__repr__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 304, "name": "request_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 308, "name": "create_url_adapter", "kind": "ref", "category": "function", "info": " self.url_adapter = app.create_url_adapter(self.request)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 334, "name": "g", "kind": "def", "category": "function", "info": " def g(self) -> AppContext:\n return _app_ctx_stack.top.g\n\n @g.setter\n def g(self, value: AppContext) -> None:\n _app_ctx_stack.top.g = value\n\n def copy(self) -> \"RequestContext\":\n \"\"\"Creates a copy of this request context with the same request object.\n This can be used to move a request context to a different greenlet.\n Because the actual request object is the same this cannot be used to\n move a request context to a different thread unless access to the\n request object is locked.\n\n .. versionadded:: 0.10\n\n .. versionchanged:: 1.1\n The current session object is used instead of reloading the original\n data. This prevents `flask.session` pointing to an out-of-date object.\n \"\"\"\n return self.__class__(\n self.app,\n environ=self.request.environ,\n request=self.request,\n session=self.session,\n )\n\n def match_request(self) -> None:\n \"\"\"Can be overridden by a subclass to hook into the matching\n of the request.\n \"\"\"\n try:\n result = self.url_adapter.match(return_rule=True) # type: ignore\n self.request.url_rule, self.request.view_args = result # type: ignore\n except HTTPException as e:\n self.request.routing_exception = e\n\n def push(self) -> None:\n \"\"\"Binds the request context to the current context.\"\"\"\n # If an exception occurs in debug mode or if context preservation is\n # activated under exception situations exactly one context stays\n # on the stack. The rationale is that you want to access that\n # information under debug situations. However if someone forgets to\n # pop that context again we want to make sure that on the next push\n # it's invalidated, otherwise we run at risk that something leaks\n # memory. This is usually only a problem in test suite since this\n # functionality is not active in production environments.\n top = _request_ctx_stack.top\n if top is not None and top.preserved:\n top.pop(top._preserved_exc)\n\n # Before we push the request context we have to ensure that there\n # is an application context.\n app_ctx = _app_ctx_stack.top\n if app_ctx is None or app_ctx.app != self.app:\n app_ctx = self.app.app_context()\n app_ctx.push()\n self._implicit_app_ctx_stack.append(app_ctx)\n else:\n self._implicit_app_ctx_stack.append(None)\n\n _request_ctx_stack.push(self)\n\n if self.url_adapter is not None:\n self.match_request()\n\n # Open the session at the moment that the request context is available.\n # This allows a custom open_session method to use the request context.\n # Only open a new session if this is the first time the request was\n # pushed, otherwise stream_with_context loses the session.\n if self.session is None:\n session_interface = self.app.session_interface\n self.session = session_interface.open_session(self.app, self.request)\n\n if self.session is None:\n self.session = session_interface.make_null_session(self.app)\n\n def pop(self, exc: t.Optional[BaseException] = _sentinel) -> None: # type: ignore\n \"\"\"Pops the request context and unbinds it by doing that. This will\n also trigger the execution of functions registered by the\n :meth:`~flask.Flask.teardown_request` decorator.\n\n .. versionchanged:: 0.9\n Added the `exc` argument.\n \"\"\"\n app_ctx = self._implicit_app_ctx_stack.pop()\n clear_request = False\n\n try:\n if not self._implicit_app_ctx_stack:\n self.preserved = False\n self._preserved_exc = None\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n self.app.do_teardown_request(exc)\n\n request_close = getattr(self.request, \"close\", None)\n if request_close is not None:\n request_close()\n clear_request = True\n finally:\n rv = _request_ctx_stack.pop()\n\n # get rid of circular dependencies at the end of the request\n # so that we don't require the GC to be active.\n if clear_request:\n rv.request.environ[\"werkzeug.request\"] = None\n\n # Get rid of the app as well if necessary.\n if app_ctx is not None:\n app_ctx.pop(exc)\n\n assert (\n rv is self\n ), f\"Popped wrong request context. ({rv!r} instead of {self!r})\"\n\n def auto_pop(self, exc: t.Optional[BaseException]) -> None:\n if self.request.environ.get(\"flask._preserve_context\") or (\n exc is not None and self.app.preserve_context_on_exception\n ):\n self.preserved = True\n self._preserved_exc = exc # type: ignore\n else:\n self.pop(exc)\n\n def __enter__(self) -> \"RequestContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n # do not pop the request stack if we are in debug mode and an\n # exception happened. This will allow the debugger to still\n # access the request object in the interactive shell. Furthermore\n # the context can be force kept alive for the test client.\n # See flask.testing for how this works.\n self.auto_pop(exc_value)\n\n def __repr__(self) -> str:\n return (\n f\"<{type(self).__name__} {self.request.url!r}\"\n f\" [{self.request.method}] of {self.app.name}>\"\n )\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 338, "name": "g", "kind": "def", "category": "function", "info": " def g(self, value: AppContext) -> None:\n _app_ctx_stack.top.g = value\n\n def copy(self) -> \"RequestContext\":\n \"\"\"Creates a copy of this request context with the same request object.\n This can be used to move a request context to a different greenlet.\n Because the actual request object is the same this cannot be used to\n move a request context to a different thread unless access to the\n request object is locked.\n\n .. versionadded:: 0.10\n\n .. versionchanged:: 1.1\n The current session object is used instead of reloading the original\n data. This prevents `flask.session` pointing to an out-of-date object.\n \"\"\"\n return self.__class__(\n self.app,\n environ=self.request.environ,\n request=self.request,\n session=self.session,\n )\n\n def match_request(self) -> None:\n \"\"\"Can be overridden by a subclass to hook into the matching\n of the request.\n \"\"\"\n try:\n result = self.url_adapter.match(return_rule=True) # type: ignore\n self.request.url_rule, self.request.view_args = result # type: ignore\n except HTTPException as e:\n self.request.routing_exception = e\n\n def push(self) -> None:\n \"\"\"Binds the request context to the current context.\"\"\"\n # If an exception occurs in debug mode or if context preservation is\n # activated under exception situations exactly one context stays\n # on the stack. The rationale is that you want to access that\n # information under debug situations. However if someone forgets to\n # pop that context again we want to make sure that on the next push\n # it's invalidated, otherwise we run at risk that something leaks\n # memory. This is usually only a problem in test suite since this\n # functionality is not active in production environments.\n top = _request_ctx_stack.top\n if top is not None and top.preserved:\n top.pop(top._preserved_exc)\n\n # Before we push the request context we have to ensure that there\n # is an application context.\n app_ctx = _app_ctx_stack.top\n if app_ctx is None or app_ctx.app != self.app:\n app_ctx = self.app.app_context()\n app_ctx.push()\n self._implicit_app_ctx_stack.append(app_ctx)\n else:\n self._implicit_app_ctx_stack.append(None)\n\n _request_ctx_stack.push(self)\n\n if self.url_adapter is not None:\n self.match_request()\n\n # Open the session at the moment that the request context is available.\n # This allows a custom open_session method to use the request context.\n # Only open a new session if this is the first time the request was\n # pushed, otherwise stream_with_context loses the session.\n if self.session is None:\n session_interface = self.app.session_interface\n self.session = session_interface.open_session(self.app, self.request)\n\n if self.session is None:\n self.session = session_interface.make_null_session(self.app)\n\n def pop(self, exc: t.Optional[BaseException] = _sentinel) -> None: # type: ignore\n \"\"\"Pops the request context and unbinds it by doing that. This will\n also trigger the execution of functions registered by the\n :meth:`~flask.Flask.teardown_request` decorator.\n\n .. versionchanged:: 0.9\n Added the `exc` argument.\n \"\"\"\n app_ctx = self._implicit_app_ctx_stack.pop()\n clear_request = False\n\n try:\n if not self._implicit_app_ctx_stack:\n self.preserved = False\n self._preserved_exc = None\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n self.app.do_teardown_request(exc)\n\n request_close = getattr(self.request, \"close\", None)\n if request_close is not None:\n request_close()\n clear_request = True\n finally:\n rv = _request_ctx_stack.pop()\n\n # get rid of circular dependencies at the end of the request\n # so that we don't require the GC to be active.\n if clear_request:\n rv.request.environ[\"werkzeug.request\"] = None\n\n # Get rid of the app as well if necessary.\n if app_ctx is not None:\n app_ctx.pop(exc)\n\n assert (\n rv is self\n ), f\"Popped wrong request context. ({rv!r} instead of {self!r})\"\n\n def auto_pop(self, exc: t.Optional[BaseException]) -> None:\n if self.request.environ.get(\"flask._preserve_context\") or (\n exc is not None and self.app.preserve_context_on_exception\n ):\n self.preserved = True\n self._preserved_exc = exc # type: ignore\n else:\n self.pop(exc)\n\n def __enter__(self) -> \"RequestContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n # do not pop the request stack if we are in debug mode and an\n # exception happened. This will allow the debugger to still\n # access the request object in the interactive shell. Furthermore\n # the context can be force kept alive for the test client.\n # See flask.testing for how this works.\n self.auto_pop(exc_value)\n\n def __repr__(self) -> str:\n return (\n f\"<{type(self).__name__} {self.request.url!r}\"\n f\" [{self.request.method}] of {self.app.name}>\"\n )\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 361, "name": "match_request", "kind": "def", "category": "function", "info": " def match_request(self) -> None:\n \"\"\"Can be overridden by a subclass to hook into the matching\n of the request.\n \"\"\"\n try:\n result = self.url_adapter.match(return_rule=True) # type: ignore\n self.request.url_rule, self.request.view_args = result # type: ignore\n except HTTPException as e:\n self.request.routing_exception = e\n\n def push(self) -> None:\n \"\"\"Binds the request context to the current context.\"\"\"\n # If an exception occurs in debug mode or if context preservation is\n # activated under exception situations exactly one context stays\n # on the stack. The rationale is that you want to access that\n # information under debug situations. However if someone forgets to\n # pop that context again we want to make sure that on the next push\n # it's invalidated, otherwise we run at risk that something leaks\n # memory. This is usually only a problem in test suite since this\n # functionality is not active in production environments.\n top = _request_ctx_stack.top\n if top is not None and top.preserved:\n top.pop(top._preserved_exc)\n\n # Before we push the request context we have to ensure that there\n # is an application context.\n app_ctx = _app_ctx_stack.top\n if app_ctx is None or app_ctx.app != self.app:\n app_ctx = self.app.app_context()\n app_ctx.push()\n self._implicit_app_ctx_stack.append(app_ctx)\n else:\n self._implicit_app_ctx_stack.append(None)\n\n _request_ctx_stack.push(self)\n\n if self.url_adapter is not None:\n self.match_request()\n\n # Open the session at the moment that the request context is available.\n # This allows a custom open_session method to use the request context.\n # Only open a new session if this is the first time the request was\n # pushed, otherwise stream_with_context loses the session.\n if self.session is None:\n session_interface = self.app.session_interface\n self.session = session_interface.open_session(self.app, self.request)\n\n if self.session is None:\n self.session = session_interface.make_null_session(self.app)\n\n def pop(self, exc: t.Optional[BaseException] = _sentinel) -> None: # type: ignore\n \"\"\"Pops the request context and unbinds it by doing that. This will\n also trigger the execution of functions registered by the\n :meth:`~flask.Flask.teardown_request` decorator.\n\n .. versionchanged:: 0.9\n Added the `exc` argument.\n \"\"\"\n app_ctx = self._implicit_app_ctx_stack.pop()\n clear_request = False\n\n try:\n if not self._implicit_app_ctx_stack:\n self.preserved = False\n self._preserved_exc = None\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n self.app.do_teardown_request(exc)\n\n request_close = getattr(self.request, \"close\", None)\n if request_close is not None:\n request_close()\n clear_request = True\n finally:\n rv = _request_ctx_stack.pop()\n\n # get rid of circular dependencies at the end of the request\n # so that we don't require the GC to be active.\n if clear_request:\n rv.request.environ[\"werkzeug.request\"] = None\n\n # Get rid of the app as well if necessary.\n if app_ctx is not None:\n app_ctx.pop(exc)\n\n assert (\n rv is self\n ), f\"Popped wrong request context. ({rv!r} instead of {self!r})\"\n\n def auto_pop(self, exc: t.Optional[BaseException]) -> None:\n if self.request.environ.get(\"flask._preserve_context\") or (\n exc is not None and self.app.preserve_context_on_exception\n ):\n self.preserved = True\n self._preserved_exc = exc # type: ignore\n else:\n self.pop(exc)\n\n def __enter__(self) -> \"RequestContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n # do not pop the request stack if we are in debug mode and an\n # exception happened. This will allow the debugger to still\n # access the request object in the interactive shell. Furthermore\n # the context can be force kept alive for the test client.\n # See flask.testing for how this works.\n self.auto_pop(exc_value)\n\n def __repr__(self) -> str:\n return (\n f\"<{type(self).__name__} {self.request.url!r}\"\n f\" [{self.request.method}] of {self.app.name}>\"\n )\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 366, "name": "match", "kind": "ref", "category": "function", "info": " result = self.url_adapter.match(return_rule=True) # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 371, "name": "push", "kind": "def", "category": "function", "info": " def push(self) -> None:\n \"\"\"Binds the app context to the current context.\"\"\"\n self._refcnt += 1\n _app_ctx_stack.push(self)\n appcontext_pushed.send(self.app)\n\n def pop(self, exc: t.Optional[BaseException] = _sentinel) -> None: # type: ignore\n \"\"\"Pops the app context.\"\"\"\n try:\n self._refcnt -= 1\n if self._refcnt <= 0:\n if exc is _sentinel:\n exc = sys.exc_info()[1]\n self.app.do_teardown_appcontext(exc)\n finally:\n rv = _app_ctx_stack.pop()\n assert rv is self, f\"Popped wrong app context. ({rv!r} instead of {self!r})\"\n appcontext_popped.send(self.app)\n\n def __enter__(self) -> \"AppContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n self.pop(exc_value)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 389, "name": "app_context", "kind": "ref", "category": "function", "info": " app_ctx = self.app.app_context()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 390, "name": "push", "kind": "ref", "category": "function", "info": " app_ctx.push()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 395, "name": "push", "kind": "ref", "category": "function", "info": " _request_ctx_stack.push(self)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 398, "name": "match_request", "kind": "ref", "category": "function", "info": " self.match_request()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 406, "name": "open_session", "kind": "ref", "category": "function", "info": " self.session = session_interface.open_session(self.app, self.request)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 409, "name": "make_null_session", "kind": "ref", "category": "function", "info": " self.session = session_interface.make_null_session(self.app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 428, "name": "do_teardown_request", "kind": "ref", "category": "function", "info": " self.app.do_teardown_request(exc)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 432, "name": "request_close", "kind": "ref", "category": "function", "info": " request_close()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 450, "name": "auto_pop", "kind": "def", "category": "function", "info": " def auto_pop(self, exc: t.Optional[BaseException]) -> None:\n if self.request.environ.get(\"flask._preserve_context\") or (\n exc is not None and self.app.preserve_context_on_exception\n ):\n self.preserved = True\n self._preserved_exc = exc # type: ignore\n else:\n self.pop(exc)\n\n def __enter__(self) -> \"RequestContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n # do not pop the request stack if we are in debug mode and an\n # exception happened. This will allow the debugger to still\n # access the request object in the interactive shell. Furthermore\n # the context can be force kept alive for the test client.\n # See flask.testing for how this works.\n self.auto_pop(exc_value)\n\n def __repr__(self) -> str:\n return (\n f\"<{type(self).__name__} {self.request.url!r}\"\n f\" [{self.request.method}] of {self.app.name}>\"\n )\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 459, "name": "__enter__", "kind": "def", "category": "function", "info": " def __enter__(self) -> \"RequestContext\":\n self.push()\n return self\n\n def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n # do not pop the request stack if we are in debug mode and an\n # exception happened. This will allow the debugger to still\n # access the request object in the interactive shell. Furthermore\n # the context can be force kept alive for the test client.\n # See flask.testing for how this works.\n self.auto_pop(exc_value)\n\n def __repr__(self) -> str:\n return (\n f\"<{type(self).__name__} {self.request.url!r}\"\n f\" [{self.request.method}] of {self.app.name}>\"\n )\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 460, "name": "push", "kind": "ref", "category": "function", "info": " self.push()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 463, "name": "__exit__", "kind": "def", "category": "function", "info": " def __exit__(\n self, exc_type: type, exc_value: BaseException, tb: TracebackType\n ) -> None:\n self.pop(exc_value)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/ctx.py", "rel_fname": "src/flask/ctx.py", "line": 471, "name": "auto_pop", "kind": "ref", "category": "function", "info": " self.auto_pop(exc_value)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 9, "name": "UnexpectedUnicodeError", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 15, "name": "DebugFilesKeyError", "kind": "def", "category": "class", "info": "__init__\t__str__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 21, "name": "getlist", "kind": "ref", "category": "function", "info": " form_matches = request.form.getlist(key)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 42, "name": "FormDataRoutingRedirect", "kind": "def", "category": "class", "info": "__init__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 74, "name": "attach_enctype_error_multidict", "kind": "def", "category": "function", "info": "def attach_enctype_error_multidict(request):\n \"\"\"Since Flask 0.8 we're monkeypatching the files object in case a\n request is detected that does not use multipart form data but the files\n object is accessed.\n \"\"\"\n oldcls = request.files.__class__\n\n class newcls(oldcls):\n def __getitem__(self, key):\n try:\n return oldcls.__getitem__(self, key)\n except KeyError:\n if key not in request.form:\n raise\n raise DebugFilesKeyError(request, key)\n\n newcls.__name__ = oldcls.__name__\n newcls.__module__ = oldcls.__module__\n request.files.__class__ = newcls\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 81, "name": "newcls", "kind": "def", "category": "class", "info": "__getitem__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 88, "name": "DebugFilesKeyError", "kind": "ref", "category": "function", "info": " raise DebugFilesKeyError(request, key)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 95, "name": "_dump_loader_info", "kind": "def", "category": "function", "info": "def _dump_loader_info(loader) -> t.Generator:\n yield f\"class: {type(loader).__module__}.{type(loader).__name__}\"\n for key, value in sorted(loader.__dict__.items()):\n if key.startswith(\"_\"):\n continue\n if isinstance(value, (tuple, list)):\n if not all(isinstance(x, str) for x in value):\n continue\n yield f\"{key}:\"\n for item in value:\n yield f\" - {item}\"\n continue\n elif not isinstance(value, (str, int, float, bool)):\n continue\n yield f\"{key}: {value!r}\"\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 112, "name": "explain_template_loading_attempts", "kind": "def", "category": "function", "info": "def explain_template_loading_attempts(app: Flask, template, attempts) -> None:\n \"\"\"This should help developers understand what failed\"\"\"\n info = [f\"Locating template {template!r}:\"]\n total_found = 0\n blueprint = None\n reqctx = _request_ctx_stack.top\n if reqctx is not None and reqctx.request.blueprint is not None:\n blueprint = reqctx.request.blueprint\n\n for idx, (loader, srcobj, triple) in enumerate(attempts):\n if isinstance(srcobj, Flask):\n src_info = f\"application {srcobj.import_name!r}\"\n elif isinstance(srcobj, Blueprint):\n src_info = f\"blueprint {srcobj.name!r} ({srcobj.import_name})\"\n else:\n src_info = repr(srcobj)\n\n info.append(f\"{idx + 1:5}: trying loader of {src_info}\")\n\n for line in _dump_loader_info(loader):\n info.append(f\" {line}\")\n\n if triple is None:\n detail = \"no match\"\n else:\n detail = f\"found ({triple[1] or ''!r})\"\n total_found += 1\n info.append(f\" -> {detail}\")\n\n seems_fishy = False\n if total_found == 0:\n info.append(\"Error: the template could not be found.\")\n seems_fishy = True\n elif total_found > 1:\n info.append(\"Warning: multiple loaders returned a match for the template.\")\n seems_fishy = True\n\n if blueprint is not None and seems_fishy:\n info.append(\n \" The template was looked up from an endpoint that belongs\"\n f\" to the blueprint {blueprint!r}.\"\n )\n info.append(\" Maybe you did not place a template in the right folder?\")\n info.append(\" See https://flask.palletsprojects.com/blueprints/#templates\")\n\n app.logger.info(\"\\n\".join(info))\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 131, "name": "_dump_loader_info", "kind": "ref", "category": "function", "info": " for line in _dump_loader_info(loader):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 157, "name": "info", "kind": "ref", "category": "function", "info": " app.logger.info(\"\\n\".join(info))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/debughelpers.py", "rel_fname": "src/flask/debughelpers.py", "line": 160, "name": "explain_ignored_app_run", "kind": "def", "category": "function", "info": "def explain_ignored_app_run() -> None:\n if os.environ.get(\"WERKZEUG_RUN_MAIN\") != \"true\":\n warn(\n Warning(\n \"Silently ignoring app.run() because the application is\"\n \" run from the flask command line executable. Consider\"\n ' putting app.run() behind an if __name__ == \"__main__\"'\n \" guard to silence this warning.\"\n ),\n stacklevel=3,\n )\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/globals.py", "rel_fname": "src/flask/globals.py", "line": 29, "name": "_lookup_req_object", "kind": "def", "category": "function", "info": "def _lookup_req_object(name):\n top = _request_ctx_stack.top\n if top is None:\n raise RuntimeError(_request_ctx_err_msg)\n return getattr(top, name)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/globals.py", "rel_fname": "src/flask/globals.py", "line": 36, "name": "_lookup_app_object", "kind": "def", "category": "function", "info": "def _lookup_app_object(name):\n top = _app_ctx_stack.top\n if top is None:\n raise RuntimeError(_app_ctx_err_msg)\n return getattr(top, name)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/globals.py", "rel_fname": "src/flask/globals.py", "line": 43, "name": "_find_app", "kind": "def", "category": "function", "info": "def _find_app():\n top = _app_ctx_stack.top\n if top is None:\n raise RuntimeError(_app_ctx_err_msg)\n return top.app\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 27, "name": "get_env", "kind": "def", "category": "function", "info": "def get_env() -> str:\n \"\"\"Get the environment the app is running in, indicated by the\n :envvar:`FLASK_ENV` environment variable. The default is\n ``'production'``.\n \"\"\"\n return os.environ.get(\"FLASK_ENV\") or \"production\"\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 35, "name": "get_debug_flag", "kind": "def", "category": "function", "info": "def get_debug_flag() -> bool:\n \"\"\"Get whether debug mode should be enabled for the app, indicated\n by the :envvar:`FLASK_DEBUG` environment variable. The default is\n ``True`` if :func:`.get_env` returns ``'development'``, or ``False``\n otherwise.\n \"\"\"\n val = os.environ.get(\"FLASK_DEBUG\")\n\n if not val:\n return get_env() == \"development\"\n\n return val.lower() not in (\"0\", \"false\", \"no\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 44, "name": "get_env", "kind": "ref", "category": "function", "info": " return get_env() == \"development\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 49, "name": "get_load_dotenv", "kind": "def", "category": "function", "info": "def get_load_dotenv(default: bool = True) -> bool:\n \"\"\"Get whether the user has disabled loading dotenv files by setting\n :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the\n files.\n\n :param default: What to return if the env var isn't set.\n \"\"\"\n val = os.environ.get(\"FLASK_SKIP_DOTENV\")\n\n if not val:\n return default\n\n return val.lower() in (\"0\", \"false\", \"no\")\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 64, "name": "stream_with_context", "kind": "def", "category": "function", "info": "def stream_with_context(\n generator_or_function: t.Union[t.Generator, t.Callable]\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 104, "name": "decorator", "kind": "def", "category": "function", "info": " def decorator(*args: t.Any, **kwargs: t.Any) -> t.Any:\n gen = generator_or_function(*args, **kwargs) # type: ignore\n return stream_with_context(gen)\n\n return update_wrapper(decorator, generator_or_function) # type: ignore\n\n def generator() -> t.Generator:\n ctx = _request_ctx_stack.top\n if ctx is None:\n raise RuntimeError(\n \"Attempted to stream with context but \"\n \"there was no context in the first place to keep around.\"\n )\n with ctx:\n # Dummy sentinel. Has to be inside the context block or we're\n # not actually keeping the context around.\n yield None\n\n # The try/finally is here so that if someone passes a WSGI level\n # iterator in we're still running the cleanup logic. Generators\n # don't need that because they are closed on their destruction\n # automatically.\n try:\n yield from gen\n finally:\n if hasattr(gen, \"close\"):\n gen.close() # type: ignore\n\n # The trick is to start the generator. Then the code execution runs until\n # the first dummy None is yielded at which point the context was already\n # pushed. This item is discarded. Then when the iteration continues the\n # real generator is executed.\n wrapped_g = generator()\n next(wrapped_g)\n return wrapped_g\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 105, "name": "generator_or_function", "kind": "ref", "category": "function", "info": " gen = generator_or_function(*args, **kwargs) # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 106, "name": "stream_with_context", "kind": "ref", "category": "function", "info": " return stream_with_context(gen)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 110, "name": "generator", "kind": "def", "category": "function", "info": " def generator() -> t.Generator:\n ctx = _request_ctx_stack.top\n if ctx is None:\n raise RuntimeError(\n \"Attempted to stream with context but \"\n \"there was no context in the first place to keep around.\"\n )\n with ctx:\n # Dummy sentinel. Has to be inside the context block or we're\n # not actually keeping the context around.\n yield None\n\n # The try/finally is here so that if someone passes a WSGI level\n # iterator in we're still running the cleanup logic. Generators\n # don't need that because they are closed on their destruction\n # automatically.\n try:\n yield from gen\n finally:\n if hasattr(gen, \"close\"):\n gen.close() # type: ignore\n\n # The trick is to start the generator. Then the code execution runs until\n # the first dummy None is yielded at which point the context was already\n # pushed. This item is discarded. Then when the iteration continues the\n # real generator is executed.\n wrapped_g = generator()\n next(wrapped_g)\n return wrapped_g\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 136, "name": "generator", "kind": "ref", "category": "function", "info": " wrapped_g = generator()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 141, "name": "make_response", "kind": "def", "category": "function", "info": "def make_response(*args: t.Any) -> \"Response\":\n \"\"\"Sometimes it is necessary to set additional headers in a view. Because\n views do not have to return response objects but can return a value that\n is converted into a response object by Flask itself, it becomes tricky to\n add headers to it. This function can be called instead of using a return\n and you will get a response object which you can use to attach headers.\n\n If view looked like this and you want to add a new header::\n\n def index():\n return render_template('index.html', foo=42)\n\n You can now do something like this::\n\n def index():\n response = make_response(render_template('index.html', foo=42))\n response.headers['X-Parachutes'] = 'parachutes are cool'\n return response\n\n This function accepts the very same arguments you can return from a\n view function. This for example creates a response with a 404 error\n code::\n\n response = make_response(render_template('not_found.html'), 404)\n\n The other use case of this function is to force the return value of a\n view function into a response which is helpful with view\n decorators::\n\n response = make_response(view_function())\n response.headers['X-Parachutes'] = 'parachutes are cool'\n\n Internally this function does the following things:\n\n - if no arguments are passed, it creates a new response argument\n - if one argument is passed, :meth:`flask.Flask.make_response`\n is invoked with it.\n - if more than one argument is passed, the arguments are passed\n to the :meth:`flask.Flask.make_response` function as tuple.\n\n .. versionadded:: 0.6\n \"\"\"\n if not args:\n return current_app.response_class()\n if len(args) == 1:\n args = args[0]\n return current_app.make_response(args)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 184, "name": "response_class", "kind": "ref", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 187, "name": "make_response", "kind": "ref", "category": "function", "info": " return current_app.make_response(args)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 190, "name": "url_for", "kind": "def", "category": "function", "info": "def url_for(endpoint: str, **values: t.Any) -> str:\n \"\"\"Generates a URL to the given endpoint with the method provided.\n\n Variable arguments that are unknown to the target endpoint are appended\n to the generated URL as query arguments. If the value of a query argument\n is ``None``, the whole pair is skipped. In case blueprints are active\n you can shortcut references to the same blueprint by prefixing the\n local endpoint with a dot (``.``).\n\n This will reference the index function local to the current blueprint::\n\n url_for('.index')\n\n See :ref:`url-building`.\n\n Configuration values ``APPLICATION_ROOT`` and ``SERVER_NAME`` are only used when\n generating URLs outside of a request context.\n\n To integrate applications, :class:`Flask` has a hook to intercept URL build\n errors through :attr:`Flask.url_build_error_handlers`. The `url_for`\n function results in a :exc:`~werkzeug.routing.BuildError` when the current\n app does not have a URL for the given endpoint and values. When it does, the\n :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if\n it is not ``None``, which can return a string to use as the result of\n `url_for` (instead of `url_for`'s default to raise the\n :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.\n An example::\n\n def external_url_handler(error, endpoint, values):\n \"Looks up an external URL when `url_for` cannot build a URL.\"\n # This is an example of hooking the build_error_handler.\n # Here, lookup_url is some utility function you've built\n # which looks up the endpoint in some external URL registry.\n url = lookup_url(endpoint, **values)\n if url is None:\n # External lookup did not have a URL.\n # Re-raise the BuildError, in context of original traceback.\n exc_type, exc_value, tb = sys.exc_info()\n if exc_value is error:\n raise exc_type(exc_value).with_traceback(tb)\n else:\n raise error\n # url_for will use this result, instead of raising BuildError.\n return url\n\n app.url_build_error_handlers.append(external_url_handler)\n\n Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and\n `endpoint` and `values` are the arguments passed into `url_for`. Note\n that this is for building URLs outside the current application, and not for\n handling 404 NotFound errors.\n\n .. versionadded:: 0.10\n The `_scheme` parameter was added.\n\n .. versionadded:: 0.9\n The `_anchor` and `_method` parameters were added.\n\n .. versionadded:: 0.9\n Calls :meth:`Flask.handle_build_error` on\n :exc:`~werkzeug.routing.BuildError`.\n\n :param endpoint: the endpoint of the URL (name of the function)\n :param values: the variable arguments of the URL rule\n :param _external: if set to ``True``, an absolute URL is generated. Server\n address can be changed via ``SERVER_NAME`` configuration variable which\n falls back to the `Host` header, then to the IP and port of the request.\n :param _scheme: a string specifying the desired URL scheme. The `_external`\n parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default\n behavior uses the same scheme as the current request, or\n :data:`PREFERRED_URL_SCHEME` if no request context is available.\n This also can be set to an empty string to build protocol-relative\n URLs.\n :param _anchor: if provided this is added as anchor to the URL.\n :param _method: if provided this explicitly specifies an HTTP method.\n \"\"\"\n appctx = _app_ctx_stack.top\n reqctx = _request_ctx_stack.top\n\n if appctx is None:\n raise RuntimeError(\n \"Attempted to generate a URL without the application context being\"\n \" pushed. This has to be executed when application context is\"\n \" available.\"\n )\n\n # If request specific information is available we have some extra\n # features that support \"relative\" URLs.\n if reqctx is not None:\n url_adapter = reqctx.url_adapter\n blueprint_name = request.blueprint\n\n if endpoint[:1] == \".\":\n if blueprint_name is not None:\n endpoint = f\"{blueprint_name}{endpoint}\"\n else:\n endpoint = endpoint[1:]\n\n external = values.pop(\"_external\", False)\n\n # Otherwise go with the url adapter from the appctx and make\n # the URLs external by default.\n else:\n url_adapter = appctx.url_adapter\n\n if url_adapter is None:\n raise RuntimeError(\n \"Application was not able to create a URL adapter for request\"\n \" independent URL generation. You might be able to fix this by\"\n \" setting the SERVER_NAME config variable.\"\n )\n\n external = values.pop(\"_external\", True)\n\n anchor = values.pop(\"_anchor\", None)\n method = values.pop(\"_method\", None)\n scheme = values.pop(\"_scheme\", None)\n appctx.app.inject_url_defaults(endpoint, values)\n\n # This is not the best way to deal with this but currently the\n # underlying Werkzeug router does not support overriding the scheme on\n # a per build call basis.\n old_scheme = None\n if scheme is not None:\n if not external:\n raise ValueError(\"When specifying _scheme, _external must be True\")\n old_scheme = url_adapter.url_scheme\n url_adapter.url_scheme = scheme\n\n try:\n try:\n rv = url_adapter.build(\n endpoint, values, method=method, force_external=external\n )\n finally:\n if old_scheme is not None:\n url_adapter.url_scheme = old_scheme\n except BuildError as error:\n # We need to inject the values again so that the app callback can\n # deal with that sort of stuff.\n values[\"_external\"] = external\n values[\"_anchor\"] = anchor\n values[\"_method\"] = method\n values[\"_scheme\"] = scheme\n return appctx.app.handle_url_build_error(error, endpoint, values)\n\n if anchor is not None:\n rv += f\"#{url_quote(anchor)}\"\n return rv\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 307, "name": "inject_url_defaults", "kind": "ref", "category": "function", "info": " appctx.app.inject_url_defaults(endpoint, values)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 321, "name": "build", "kind": "ref", "category": "function", "info": " rv = url_adapter.build(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 334, "name": "handle_url_build_error", "kind": "ref", "category": "function", "info": " return appctx.app.handle_url_build_error(error, endpoint, values)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 337, "name": "url_quote", "kind": "ref", "category": "function", "info": " rv += f\"#{url_quote(anchor)}\"\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 341, "name": "get_template_attribute", "kind": "def", "category": "function", "info": "def get_template_attribute(template_name: str, attribute: str) -> t.Any:\n \"\"\"Loads a macro (or variable) a template exports. This can be used to\n invoke a macro from within Python code. If you for example have a\n template named :file:`_cider.html` with the following contents:\n\n .. sourcecode:: html+jinja\n\n {% macro hello(name) %}Hello {{ name }}!{% endmacro %}\n\n You can access this from Python code like this::\n\n hello = get_template_attribute('_cider.html', 'hello')\n return hello('World')\n\n .. versionadded:: 0.2\n\n :param template_name: the name of the template\n :param attribute: the name of the variable of macro to access\n \"\"\"\n return getattr(current_app.jinja_env.get_template(template_name).module, attribute)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 360, "name": "get_template", "kind": "ref", "category": "function", "info": " return getattr(current_app.jinja_env.get_template(template_name).module, attribute)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 363, "name": "flash", "kind": "def", "category": "function", "info": "def flash(message: str, category: str = \"message\") -> None:\n \"\"\"Flashes a message to the next request. In order to remove the\n flashed message from the session and to display it to the user,\n the template has to call :func:`get_flashed_messages`.\n\n .. versionchanged:: 0.3\n `category` parameter added.\n\n :param message: the message to be flashed.\n :param category: the category for the message. The following values\n are recommended: ``'message'`` for any kind of message,\n ``'error'`` for errors, ``'info'`` for information\n messages and ``'warning'`` for warnings. However any\n kind of string can be used as category.\n \"\"\"\n # Original implementation:\n #\n # session.setdefault('_flashes', []).append((category, message))\n #\n # This assumed that changes made to mutable structures in the session are\n # always in sync with the session object, which is not true for session\n # implementations that use external storage for keeping their keys/values.\n flashes = session.get(\"_flashes\", [])\n flashes.append((category, message))\n session[\"_flashes\"] = flashes\n message_flashed.send(\n current_app._get_current_object(), # type: ignore\n message=message,\n category=category,\n )\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 388, "name": "send", "kind": "ref", "category": "function", "info": " message_flashed.send(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 389, "name": "_get_current_object", "kind": "ref", "category": "function", "info": " current_app._get_current_object(), # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 395, "name": "get_flashed_messages", "kind": "def", "category": "function", "info": "def get_flashed_messages(\n with_categories: bool = False, category_filter: t.Iterable[str] = ()\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 438, "name": "_prepare_send_file_kwargs", "kind": "def", "category": "function", "info": "def _prepare_send_file_kwargs(\n download_name: t.Optional[str] = None,\n attachment_filename: t.Optional[str] = None,\n etag: t.Optional[t.Union[bool, str]] = None,\n add_etags: t.Optional[t.Union[bool]] = None,\n max_age: t.Optional[\n t.Union[int, t.Callable[[t.Optional[str]], t.Optional[int]]]\n ] = None,\n cache_timeout: t.Optional[int] = None,\n **kwargs: t.Any,\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 609, "name": "_prepare_send_file_kwargs", "kind": "ref", "category": "function", "info": " **_prepare_send_file_kwargs(\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 697, "name": "_prepare_send_file_kwargs", "kind": "ref", "category": "function", "info": " directory, path, **_prepare_send_file_kwargs(**kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 701, "name": "get_root_path", "kind": "def", "category": "function", "info": "def get_root_path(import_name: str) -> str:\n \"\"\"Find the root path of a package, or the path that contains a\n module. If it cannot be found, returns the current working\n directory.\n\n Not to be confused with the value returned by :func:`find_package`.\n\n :meta private:\n \"\"\"\n # Module already imported and has a file attribute. Use that first.\n mod = sys.modules.get(import_name)\n\n if mod is not None and hasattr(mod, \"__file__\"):\n return os.path.dirname(os.path.abspath(mod.__file__))\n\n # Next attempt: check the loader.\n loader = pkgutil.get_loader(import_name)\n\n # Loader does not exist or we're referring to an unloaded main\n # module or a main module without path (interactive sessions), go\n # with the current working directory.\n if loader is None or import_name == \"__main__\":\n return os.getcwd()\n\n if hasattr(loader, \"get_filename\"):\n filepath = loader.get_filename(import_name) # type: ignore\n else:\n # Fall back to imports.\n __import__(import_name)\n mod = sys.modules[import_name]\n filepath = getattr(mod, \"__file__\", None)\n\n # If we don't have a file path it might be because it is a\n # namespace package. In this case pick the root path from the\n # first module that is contained in the package.\n if filepath is None:\n raise RuntimeError(\n \"No root path can be found for the provided module\"\n f\" {import_name!r}. This can happen because the module\"\n \" came from an import hook that does not provide file\"\n \" name information or because it's a namespace package.\"\n \" In this case the root path needs to be explicitly\"\n \" provided.\"\n )\n\n # filepath is import_name.py for a module, or __init__.py for a package.\n return os.path.dirname(os.path.abspath(filepath))\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 714, "name": "dirname", "kind": "ref", "category": "function", "info": " return os.path.dirname(os.path.abspath(mod.__file__))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 714, "name": "abspath", "kind": "ref", "category": "function", "info": " return os.path.dirname(os.path.abspath(mod.__file__))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 726, "name": "get_filename", "kind": "ref", "category": "function", "info": " filepath = loader.get_filename(import_name) # type: ignore\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 747, "name": "dirname", "kind": "ref", "category": "function", "info": " return os.path.dirname(os.path.abspath(filepath))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 747, "name": "abspath", "kind": "ref", "category": "function", "info": " return os.path.dirname(os.path.abspath(filepath))\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 750, "name": "locked_cached_property", "kind": "def", "category": "class", "info": "__init__\t__get__\t__set__\t__delete__"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 775, "name": "__set__", "kind": "def", "category": "function", "info": " def __set__(self, obj: object, value: t.Any) -> None:\n with self.lock:\n super().__set__(obj, value)\n\n def __delete__(self, obj: object) -> None:\n with self.lock:\n super().__delete__(obj)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 777, "name": "__set__", "kind": "ref", "category": "function", "info": " super().__set__(obj, value)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 779, "name": "__delete__", "kind": "def", "category": "function", "info": " def __delete__(self, obj: object) -> None:\n with self.lock:\n super().__delete__(obj)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 781, "name": "__delete__", "kind": "ref", "category": "function", "info": " super().__delete__(obj)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/helpers.py", "rel_fname": "src/flask/helpers.py", "line": 805, "name": "is_ip", "kind": "def", "category": "function", "info": "def is_ip(value: str) -> bool:\n \"\"\"Determine if the given string is an IP address.\n\n :param value: value to check\n :type value: str\n\n :return: True if string is an IP address\n :rtype: bool\n \"\"\"\n for family in (socket.AF_INET, socket.AF_INET6):\n try:\n socket.inet_pton(family, value)\n except OSError:\n pass\n else:\n return True\n\n return False\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 24, "name": "JSONEncoder", "kind": "def", "category": "class", "info": "default"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 41, "name": "default", "kind": "def", "category": "function", "info": " def default(self, o: t.Any) -> t.Any:\n \"\"\"Convert ``o`` to a JSON serializable type. See\n :meth:`json.JSONEncoder.default`. Python does not support\n overriding how basic types like ``str`` or ``list`` are\n serialized, they are handled before this method.\n \"\"\"\n if isinstance(o, date):\n return http_date(o)\n if isinstance(o, uuid.UUID):\n return str(o)\n if dataclasses and dataclasses.is_dataclass(o):\n return dataclasses.asdict(o)\n if hasattr(o, \"__html__\"):\n return str(o.__html__())\n return super().default(o)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 54, "name": "__html__", "kind": "ref", "category": "function", "info": " return str(o.__html__())\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 55, "name": "default", "kind": "ref", "category": "function", "info": " return super().default(o)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 58, "name": "JSONDecoder", "kind": "def", "category": "class", "info": ""}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 69, "name": "_dump_arg_defaults", "kind": "def", "category": "function", "info": "def _dump_arg_defaults(\n kwargs: t.Dict[str, t.Any], app: t.Optional[\"Flask\"] = None\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 90, "name": "_load_arg_defaults", "kind": "def", "category": "function", "info": "def _load_arg_defaults(\n kwargs: t.Dict[str, t.Any], app: t.Optional[\"Flask\"] = None\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 108, "name": "dumps", "kind": "def", "category": "function", "info": "def dumps(obj: t.Any, app: t.Optional[\"Flask\"] = None, **kwargs: t.Any) -> str:\n \"\"\"Serialize an object to a string of JSON.\n\n Takes the same arguments as the built-in :func:`json.dumps`, with\n some defaults from application configuration.\n\n :param obj: Object to serialize to JSON.\n :param app: Use this app's config instead of the active app context\n or defaults.\n :param kwargs: Extra arguments passed to :func:`json.dumps`.\n\n .. versionchanged:: 2.0\n ``encoding`` is deprecated and will be removed in Flask 2.1.\n\n .. versionchanged:: 1.0.3\n ``app`` can be passed directly, rather than requiring an app\n context for configuration.\n \"\"\"\n _dump_arg_defaults(kwargs, app=app)\n encoding = kwargs.pop(\"encoding\", None)\n rv = _json.dumps(obj, **kwargs)\n\n if encoding is not None:\n warnings.warn(\n \"'encoding' is deprecated and will be removed in Flask 2.1.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if isinstance(rv, str):\n return rv.encode(encoding) # type: ignore\n\n return rv\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 126, "name": "_dump_arg_defaults", "kind": "ref", "category": "function", "info": " _dump_arg_defaults(kwargs, app=app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 128, "name": "dumps", "kind": "ref", "category": "function", "info": " rv = _json.dumps(obj, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 143, "name": "dump", "kind": "def", "category": "function", "info": "def dump(\n obj: t.Any, fp: t.IO[str], app: t.Optional[\"Flask\"] = None, **kwargs: t.Any\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 161, "name": "_dump_arg_defaults", "kind": "ref", "category": "function", "info": " _dump_arg_defaults(kwargs, app=app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 166, "name": "write", "kind": "ref", "category": "function", "info": " fp.write(\"\")\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 179, "name": "dump", "kind": "ref", "category": "function", "info": " _json.dump(obj, fp, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 182, "name": "loads", "kind": "def", "category": "function", "info": "def loads(s: str, app: t.Optional[\"Flask\"] = None, **kwargs: t.Any) -> t.Any:\n \"\"\"Deserialize an object from a string of JSON.\n\n Takes the same arguments as the built-in :func:`json.loads`, with\n some defaults from application configuration.\n\n :param s: JSON string to deserialize.\n :param app: Use this app's config instead of the active app context\n or defaults.\n :param kwargs: Extra arguments passed to :func:`json.loads`.\n\n .. versionchanged:: 2.0\n ``encoding`` is deprecated and will be removed in Flask 2.1. The\n data must be a string or UTF-8 bytes.\n\n .. versionchanged:: 1.0.3\n ``app`` can be passed directly, rather than requiring an app\n context for configuration.\n \"\"\"\n _load_arg_defaults(kwargs, app=app)\n encoding = kwargs.pop(\"encoding\", None)\n\n if encoding is not None:\n warnings.warn(\n \"'encoding' is deprecated and will be removed in Flask 2.1.\"\n \" The data must be a string or UTF-8 bytes.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if isinstance(s, bytes):\n s = s.decode(encoding)\n\n return _json.loads(s, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 201, "name": "_load_arg_defaults", "kind": "ref", "category": "function", "info": " _load_arg_defaults(kwargs, app=app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 213, "name": "decode", "kind": "ref", "category": "function", "info": " s = s.decode(encoding)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 215, "name": "loads", "kind": "ref", "category": "function", "info": " return _json.loads(s, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 218, "name": "load", "kind": "def", "category": "function", "info": "def load(fp: t.IO[str], app: t.Optional[\"Flask\"] = None, **kwargs: t.Any) -> t.Any:\n \"\"\"Deserialize an object from JSON read from a file object.\n\n Takes the same arguments as the built-in :func:`json.load`, with\n some defaults from application configuration.\n\n :param fp: File object to read JSON from.\n :param app: Use this app's config instead of the active app context\n or defaults.\n :param kwargs: Extra arguments passed to :func:`json.load`.\n\n .. versionchanged:: 2.0\n ``encoding`` is deprecated and will be removed in Flask 2.1. The\n file must be text mode, or binary mode with UTF-8 bytes.\n \"\"\"\n _load_arg_defaults(kwargs, app=app)\n encoding = kwargs.pop(\"encoding\", None)\n\n if encoding is not None:\n warnings.warn(\n \"'encoding' is deprecated and will be removed in Flask 2.1.\"\n \" The file must be text mode, or binary mode with UTF-8\"\n \" bytes.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if isinstance(fp.read(0), bytes):\n fp = io.TextIOWrapper(fp, encoding) # type: ignore\n\n return _json.load(fp, **kwargs)\n\n\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 233, "name": "_load_arg_defaults", "kind": "ref", "category": "function", "info": " _load_arg_defaults(kwargs, app=app)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 245, "name": "read", "kind": "ref", "category": "function", "info": " if isinstance(fp.read(0), bytes):\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 248, "name": "load", "kind": "ref", "category": "function", "info": " return _json.load(fp, **kwargs)\n"}, {"fname": "playground/ab271214-24fb-4966-b9c3-db2e03940d42/flask/src/flask/json/__init__.py", "rel_fname": "src/flask/json/__init__.py", "line": 251, "name": "htmlsafe_dumps", "kind": "def", "category": "function", "info": "def htmlsafe_dumps(obj: t.Any, **kwargs: t.Any) -> str:\n \"\"\"Serialize an object to a string of JSON with :func:`dumps`, then\n replace HTML-unsafe characters with Unicode escapes and mark the\n result safe with :class:`~markupsafe.Markup`.\n\n This is available in templates as the ``|tojson`` filter.\n\n The returned string is safe to render in HTML documents and\n ``